diff --git a/Kinde.Api.Test/Integration/BaseIntegrationTest.cs b/Kinde.Api.Test/Integration/BaseIntegrationTest.cs new file mode 100644 index 0000000..975a963 --- /dev/null +++ b/Kinde.Api.Test/Integration/BaseIntegrationTest.cs @@ -0,0 +1,200 @@ +using System; +using System.IO; +using System.Net.Http; +using Kinde.Api.Client; +using Microsoft.Extensions.Configuration; +using Xunit; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Base class for integration tests that support both real and mock modes + /// + public abstract class BaseIntegrationTest : IClassFixture, IDisposable + { + protected readonly IntegrationTestFixture Fixture; + protected readonly Configuration ApiConfiguration; + protected readonly bool IsConfigured; + protected readonly bool UseMockMode; + protected readonly HttpClient? MockHttpClient; + + protected BaseIntegrationTest(IntegrationTestFixture fixture) + { + Fixture = fixture; + IsConfigured = fixture.IsConfigured; + UseMockMode = fixture.UseMockMode; + + if (UseMockMode) + { + // Create mock HTTP client + // Don't set BaseAddress - let ApiClient handle the full URL construction + var mockHandler = new MockHttpHandler(); + MockHttpClient = new HttpClient(mockHandler); + + ApiConfiguration = new Configuration + { + BasePath = "https://mock.kinde.com", + AccessToken = "mock_token" + }; + } + else if (IsConfigured) + { + ApiConfiguration = new Configuration + { + BasePath = fixture.Domain, + AccessToken = fixture.AccessToken + }; + } + else + { + ApiConfiguration = new Configuration(); + } + } + + /// + /// Skips the test if neither mock mode nor real credentials are configured + /// + protected void SkipIfNotConfigured() + { + if (!UseMockMode && !IsConfigured) + { + // Fail the test with a clear message about missing configuration + Assert.True(false, + "Test mode not configured. " + + "Either set USE_MOCK_MODE=true for CI/CD testing, " + + "or configure real credentials: KINDE_DOMAIN, KINDE_CLIENT_ID, KINDE_CLIENT_SECRET, and KINDE_AUDIENCE environment variables, " + + "or configure appsettings.json with KindeManagementApi section."); + } + } + + /// + /// Creates an API client instance, using mock HTTP client if in mock mode + /// + protected T CreateApiClient(Func factory) where T : class + { + if (UseMockMode && MockHttpClient != null) + { + return factory(ApiConfiguration, MockHttpClient); + } + return factory(ApiConfiguration, null); + } + + public virtual void Dispose() + { + // Override in derived classes if cleanup is needed + } + } + + /// + /// Test fixture that handles M2M authentication once per test run + /// Supports both real API mode and mock mode for CI/CD + /// + public class IntegrationTestFixture : IDisposable + { + public string? Domain { get; private set; } + public string? AccessToken { get; private set; } + public bool IsConfigured { get; private set; } + public bool UseMockMode { get; private set; } + + public IntegrationTestFixture() + { + LoadConfiguration(); + } + + private void LoadConfiguration() + { + // Check for mock mode first (for CI/CD) + var useMockMode = Environment.GetEnvironmentVariable("USE_MOCK_MODE"); + if (!string.IsNullOrWhiteSpace(useMockMode) && + (useMockMode.Equals("true", StringComparison.OrdinalIgnoreCase) || + useMockMode == "1")) + { + UseMockMode = true; + IsConfigured = true; // Mock mode is always "configured" + Console.WriteLine("✓ Using MOCK mode for integration tests (CI/CD mode)"); + return; + } + + // Try to load from appsettings.json first + var configuration = new ConfigurationBuilder() + .SetBasePath(Directory.GetCurrentDirectory()) + .AddJsonFile("appsettings.json", optional: true) + .AddJsonFile("appsettings.Development.json", optional: true) + .AddEnvironmentVariables() + .Build(); + + // Check config file for mock mode + var configMockMode = configuration["KindeManagementApi:UseMockMode"]; + if (!string.IsNullOrWhiteSpace(configMockMode) && + (configMockMode.Equals("true", StringComparison.OrdinalIgnoreCase) || + configMockMode == "1")) + { + UseMockMode = true; + IsConfigured = true; + Console.WriteLine("✓ Using MOCK mode for integration tests (from config)"); + return; + } + + // Real API mode - load credentials + var domain = configuration["KindeManagementApi:Domain"] + ?? Environment.GetEnvironmentVariable("KINDE_DOMAIN"); + var clientId = configuration["KindeManagementApi:ClientId"] + ?? Environment.GetEnvironmentVariable("KINDE_CLIENT_ID"); + var clientSecret = configuration["KindeManagementApi:ClientSecret"] + ?? Environment.GetEnvironmentVariable("KINDE_CLIENT_SECRET"); + var audience = configuration["KindeManagementApi:Audience"] + ?? Environment.GetEnvironmentVariable("KINDE_AUDIENCE"); + var scope = configuration["KindeManagementApi:Scope"] + ?? Environment.GetEnvironmentVariable("KINDE_SCOPE"); + + if (string.IsNullOrWhiteSpace(domain) || + string.IsNullOrWhiteSpace(clientId) || + string.IsNullOrWhiteSpace(clientSecret) || + string.IsNullOrWhiteSpace(audience)) + { + IsConfigured = false; + Console.WriteLine("WARNING: M2M credentials not configured. Integration tests will be skipped."); + Console.WriteLine("Configure via appsettings.json or environment variables:"); + Console.WriteLine(" - KINDE_DOMAIN"); + Console.WriteLine(" - KINDE_CLIENT_ID"); + Console.WriteLine(" - KINDE_CLIENT_SECRET"); + Console.WriteLine(" - KINDE_AUDIENCE"); + Console.WriteLine("Or set USE_MOCK_MODE=true for CI/CD testing"); + return; + } + + Domain = domain; + IsConfigured = true; + UseMockMode = false; + + // Get access token synchronously (xUnit doesn't support async in constructor) + try + { + var task = M2MAuthenticationHelper.GetAccessTokenAsync( + domain, clientId, clientSecret, audience, scope); + AccessToken = task.GetAwaiter().GetResult(); + + if (string.IsNullOrWhiteSpace(AccessToken)) + { + IsConfigured = false; + Console.WriteLine("WARNING: Failed to obtain access token. Integration tests will be skipped."); + } + else + { + Console.WriteLine("✓ M2M authentication successful for integration tests (REAL API mode)"); + } + } + catch (Exception ex) + { + IsConfigured = false; + Console.WriteLine($"WARNING: Failed to authenticate: {ex.Message}. Integration tests will be skipped."); + } + } + + public void Dispose() + { + // Cleanup if needed + } + } +} + diff --git a/Kinde.Api.Test/Integration/ConverterIntegrationTests.cs b/Kinde.Api.Test/Integration/ConverterIntegrationTests.cs new file mode 100644 index 0000000..06c8f92 --- /dev/null +++ b/Kinde.Api.Test/Integration/ConverterIntegrationTests.cs @@ -0,0 +1,667 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Kinde.Api.Api; +using Kinde.Api.Client; +using Kinde.Api.Model; +using Newtonsoft.Json; +using Xunit; +using Xunit.Abstractions; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Comprehensive integration tests for all Newtonsoft.Json converters + /// Tests serialization/deserialization round-trips for all API responses + /// + [Collection("Integration Tests")] + public class ConverterIntegrationTests : BaseIntegrationTest + { + private readonly ITestOutputHelper _output; + + public ConverterIntegrationTests(IntegrationTestFixture fixture, ITestOutputHelper output) + : base(fixture) + { + _output = output; + } + + #region Read-Only Endpoint Tests + + [Fact] + public async Task TestGetAPIs_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new APIsApi(MockHttpClient, ApiConfiguration) + : new APIsApi(ApiConfiguration); + + var result = await api.GetAPIsAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetAPIs", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetAPIs"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPIs", ex); + throw; + } + } + + [Fact] + public async Task TestGetApplications_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new ApplicationsApi(MockHttpClient, ApiConfiguration) + : new ApplicationsApi(ApiConfiguration); + + var result = await api.GetApplicationsAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetApplications", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetApplications"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplications", ex); + throw; + } + } + + [Fact] + public async Task TestGetRoles_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new RolesApi(MockHttpClient, ApiConfiguration) + : new RolesApi(ApiConfiguration); + + var result = await api.GetRolesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetRoles", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetRoles"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRoles", ex); + throw; + } + } + + [Fact] + public async Task TestGetPermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new PermissionsApi(MockHttpClient, ApiConfiguration) + : new PermissionsApi(ApiConfiguration); + + var result = await api.GetPermissionsAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetPermissions", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetPermissions"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetPermissions", ex); + throw; + } + } + + [Fact] + public async Task TestGetProperties_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new PropertiesApi(MockHttpClient, ApiConfiguration) + : new PropertiesApi(ApiConfiguration); + + var result = await api.GetPropertiesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetProperties", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetProperties"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetProperties", ex); + throw; + } + } + + [Fact] + public async Task TestGetOrganizations_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new OrganizationsApi(MockHttpClient, ApiConfiguration) + : new OrganizationsApi(ApiConfiguration); + + var result = await api.GetOrganizationsAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetOrganizations", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetOrganizations"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizations", ex); + throw; + } + } + + [Fact] + public async Task TestGetConnections_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new ConnectionsApi(MockHttpClient, ApiConfiguration) + : new ConnectionsApi(ApiConfiguration); + + var result = await api.GetConnectionsAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetConnections", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetConnections"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetConnections", ex); + throw; + } + } + + [Fact] + public async Task TestGetEnvironment_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new EnvironmentsApi(MockHttpClient, ApiConfiguration) + : new EnvironmentsApi(ApiConfiguration); + + var result = await api.GetEnvironmentAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetEnvironment", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetEnvironment"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetEnvironment", ex); + throw; + } + } + + [Fact] + public async Task TestGetEnvironmentVariables_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new EnvironmentVariablesApi(MockHttpClient, ApiConfiguration) + : new EnvironmentVariablesApi(ApiConfiguration); + + var result = await api.GetEnvironmentVariablesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetEnvironmentVariables", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetEnvironmentVariables"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetEnvironmentVariables", ex); + throw; + } + } + + [Fact] + public async Task TestGetBusiness_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new BusinessApi(MockHttpClient, ApiConfiguration) + : new BusinessApi(ApiConfiguration); + + var result = await api.GetBusinessAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetBusiness", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetBusiness"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetBusiness", ex); + throw; + } + } + + [Fact] + public async Task TestGetIndustries_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new IndustriesApi(MockHttpClient, ApiConfiguration) + : new IndustriesApi(ApiConfiguration); + + var result = await api.GetIndustriesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetIndustries", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetIndustries"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetIndustries", ex); + throw; + } + } + + [Fact] + public async Task TestGetTimezones_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new TimezonesApi(MockHttpClient, ApiConfiguration) + : new TimezonesApi(ApiConfiguration); + + var result = await api.GetTimezonesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetTimezones", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetTimezones"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetTimezones", ex); + throw; + } + } + + [Fact] + public async Task TestGetCategories_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new PropertyCategoriesApi(MockHttpClient, ApiConfiguration) + : new PropertyCategoriesApi(ApiConfiguration); + + var result = await api.GetCategoriesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetCategories", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetCategories"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetCategories", ex); + throw; + } + } + + [Fact] + public async Task TestGetSubscribers_Converter() + { + SkipIfNotConfigured(); + + try + { + var api = UseMockMode && MockHttpClient != null + ? new SubscribersApi(MockHttpClient, ApiConfiguration) + : new SubscribersApi(ApiConfiguration); + + var result = await api.GetSubscribersAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetSubscribers", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetSubscribers"); + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetSubscribers", ex); + throw; + } + } + + #endregion + + #region Parameterized Endpoint Tests + + [Fact] + public async Task TestGetAPI_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var apisApi = UseMockMode && MockHttpClient != null + ? new APIsApi(MockHttpClient, ApiConfiguration) + : new APIsApi(ApiConfiguration); + + var apis = await apisApi.GetAPIsAsync(); + + if (apis?.Apis != null && apis.Apis.Count > 0) + { + var apiId = apis.Apis[0].Id; + var result = await apisApi.GetAPIAsync(apiId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetAPI-{apiId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPI_WithId", ex); + throw; + } + } + + [Fact] + public async Task TestGetAPIScopes_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var apisApi = UseMockMode && MockHttpClient != null + ? new APIsApi(MockHttpClient, ApiConfiguration) + : new APIsApi(ApiConfiguration); + + var apis = await apisApi.GetAPIsAsync(); + + if (apis?.Apis != null && apis.Apis.Count > 0) + { + var apiId = apis.Apis[0].Id; + var result = await apisApi.GetAPIScopesAsync(apiId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetAPIScopes-{apiId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPIScopes_WithId", ex); + throw; + } + } + + [Fact] + public async Task TestGetApplication_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var appsApi = UseMockMode && MockHttpClient != null + ? new ApplicationsApi(MockHttpClient, ApiConfiguration) + : new ApplicationsApi(ApiConfiguration); + + var applications = await appsApi.GetApplicationsAsync(); + + if (applications?.Applications != null && applications.Applications.Count > 0) + { + var appId = applications.Applications[0].Id; + var result = await appsApi.GetApplicationAsync(appId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetApplication-{appId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplication_WithId", ex); + throw; + } + } + + [Fact] + public async Task TestGetRole_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var rolesApi = UseMockMode && MockHttpClient != null + ? new RolesApi(MockHttpClient, ApiConfiguration) + : new RolesApi(ApiConfiguration); + + var roles = await rolesApi.GetRolesAsync(); + + if (roles?.Roles != null && roles.Roles.Count > 0) + { + var roleId = roles.Roles[0].Id; + var result = await rolesApi.GetRoleAsync(roleId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetRole-{roleId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRole_WithId", ex); + throw; + } + } + + [Fact] + public async Task TestGetRoleScopes_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var rolesApi = UseMockMode && MockHttpClient != null + ? new RolesApi(MockHttpClient, ApiConfiguration) + : new RolesApi(ApiConfiguration); + + var roles = await rolesApi.GetRolesAsync(); + + if (roles?.Roles != null && roles.Roles.Count > 0) + { + var roleId = roles.Roles[0].Id; + var result = await rolesApi.GetRoleScopesAsync(roleId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetRoleScopes-{roleId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRoleScopes_WithId", ex); + throw; + } + } + + [Fact] + public async Task TestGetRolePermissions_WithId_Converter() + { + SkipIfNotConfigured(); + + try + { + var rolesApi = UseMockMode && MockHttpClient != null + ? new RolesApi(MockHttpClient, ApiConfiguration) + : new RolesApi(ApiConfiguration); + + var roles = await rolesApi.GetRolesAsync(); + + if (roles?.Roles != null && roles.Roles.Count > 0) + { + var roleId = roles.Roles[0].Id; + var result = await rolesApi.GetRolePermissionsAsync(roleId); + + Assert.NotNull(result); + TestSerializationRoundTrip(result, $"GetRolePermissions-{roleId}"); + } + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRolePermissions_WithId", ex); + throw; + } + } + + #endregion + + #region Helper Methods + + /// + /// Tests serialization/deserialization round-trip for a response object + /// + private void TestSerializationRoundTrip(T original, string testName) where T : class + { + try + { + // Get the standard converters from ApiClient using reflection + var apiClientType = typeof(Kinde.Api.Client.ApiClient); + var helperType = apiClientType.Assembly.GetType("Kinde.Api.Client.JsonConverterHelper"); + IList converters; + if (helperType != null) + { + var method = helperType.GetMethod("CreateStandardConverters", + System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.Static); + if (method != null) + { + converters = (IList)method.Invoke(null, null)!; + } + else + { + throw new InvalidOperationException("Could not find CreateStandardConverters method"); + } + } + else + { + throw new InvalidOperationException("Could not find JsonConverterHelper type"); + } + var settings = new JsonSerializerSettings + { + Converters = converters, + NullValueHandling = NullValueHandling.Ignore + }; + + // Serialize + var json = JsonConvert.SerializeObject(original, settings); + Assert.False(string.IsNullOrEmpty(json), + $"{testName}: Serialization produced empty JSON"); + + _output.WriteLine($"{testName}: Serialized to {json.Length} characters"); + + // Deserialize + var deserialized = JsonConvert.DeserializeObject(json, settings); + Assert.NotNull(deserialized); + + // Round-trip comparison + var originalJson = JsonConvert.SerializeObject(original, settings); + var deserializedJson = JsonConvert.SerializeObject(deserialized, settings); + + Assert.Equal(originalJson, deserializedJson); + + _output.WriteLine($"✓ {testName}: Converter test passed"); + } + catch (Exception ex) + { + _output.WriteLine($"✗ {testName}: Converter test failed - {ex.Message}"); + throw; + } + } + + #endregion + } +} + diff --git a/Kinde.Api.Test/Integration/GeneratedConverterIntegrationTests.cs b/Kinde.Api.Test/Integration/GeneratedConverterIntegrationTests.cs new file mode 100644 index 0000000..d7c0c1a --- /dev/null +++ b/Kinde.Api.Test/Integration/GeneratedConverterIntegrationTests.cs @@ -0,0 +1,3146 @@ +// +// This file is automatically generated by the test generator. +// DO NOT EDIT THIS FILE MANUALLY - your changes will be overwritten. +// To regenerate this file, run: python generate_integration_tests.py --spec --output +// + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Threading.Tasks; +using Kinde.Api.Api; +using Kinde.Api.Client; +using Kinde.Api.Model; +using Newtonsoft.Json; +using Xunit; +using Xunit.Abstractions; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Auto-generated integration tests for all API endpoints + /// Tests serialization/deserialization round-trips for all converters + /// + [Collection("Integration Tests")] + public class GeneratedConverterIntegrationTests : BaseIntegrationTest + { + private readonly ITestOutputHelper _output; + + public GeneratedConverterIntegrationTests(IntegrationTestFixture fixture, ITestOutputHelper output) + : base(fixture) + { + _output = output; + } + + + #region APIsApi Tests + + + [Fact] + public async Task TestGetAPIs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetAPIs - requires parameters: expand"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPIs", ex); + throw; + } + } + + + [Fact] + public async Task TestAddAPIs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping AddAPIs - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddAPIs", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteAPI_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteAPI - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteAPI", ex); + throw; + } + } + + + [Fact] + public async Task TestGetAPI_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetAPI - requires parameters: api_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPI", ex); + throw; + } + } + + + [Fact] + public async Task TestGetAPIScopes_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetAPIScopes - requires parameters: api_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPIScopes", ex); + throw; + } + } + + + [Fact] + public async Task TestAddAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping AddAPIScope - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteAPIScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestGetAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetAPIScope - requires parameters: api_id, scope_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateAPIScope - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteAPIAppliationScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteAPIAppliationScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteAPIAppliationScope", ex); + throw; + } + } + + + #endregion + + + #region ApplicationsApi Tests + + + [Fact] + public async Task TestUpdateAPIApplications_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateAPIApplications - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateAPIApplications", ex); + throw; + } + } + + + [Fact] + public async Task TestAddAPIApplicationScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping AddAPIApplicationScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddAPIApplicationScope", ex); + throw; + } + } + + + [Fact] + public async Task TestGetApplications_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetApplications - requires parameters: sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplications", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateApplication_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateApplication - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateApplication", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteApplication_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteApplication - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteApplication", ex); + throw; + } + } + + + [Fact] + public async Task TestGetApplication_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetApplication - requires parameters: application_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplication", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateApplication_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateApplication - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateApplication", ex); + throw; + } + } + + + [Fact] + public async Task TestGetApplicationConnections_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetApplicationConnections - requires parameters: application_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplicationConnections", ex); + throw; + } + } + + + [Fact] + public async Task TestGetApplicationPropertyValues_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetApplicationPropertyValues - requires parameters: application_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetApplicationPropertyValues", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateApplicationsProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateApplicationsProperty - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateApplicationsProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateApplicationTokens_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateApplicationTokens - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateApplicationTokens", ex); + throw; + } + } + + + #endregion + + + #region ConnectionsApi Tests + + + [Fact] + public async Task TestRemoveConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping RemoveConnection - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "RemoveConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestEnableConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping EnableConnection - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "EnableConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestGetConnections_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetConnections - requires parameters: page_size, home_realm_domain, starting_after, ending_before"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetConnections", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateConnection - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteConnection - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestGetConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetConnection - requires parameters: connection_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateConnection - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestReplaceConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping ReplaceConnection - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ReplaceConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestRemoveOrgConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping RemoveOrgConnection - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "RemoveOrgConnection", ex); + throw; + } + } + + + [Fact] + public async Task TestEnableOrgConnection_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping EnableOrgConnection - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "EnableOrgConnection", ex); + throw; + } + } + + + #endregion + + + #region BillingEntitlementsApi Tests + + + [Fact] + public async Task TestGetBillingEntitlements_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetBillingEntitlements - requires parameters: page_size, starting_after, ending_before, customer_id, max_value, expand"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetBillingEntitlements", ex); + throw; + } + } + + + #endregion + + + #region BillingAgreementsApi Tests + + + [Fact] + public async Task TestGetBillingAgreements_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetBillingAgreements - requires parameters: page_size, starting_after, ending_before, customer_id, feature_code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetBillingAgreements", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateBillingAgreement_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateBillingAgreement - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateBillingAgreement", ex); + throw; + } + } + + + #endregion + + + #region BillingMeterUsageApi Tests + + + [Fact] + public async Task TestCreateMeterUsageRecord_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateMeterUsageRecord - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateMeterUsageRecord", ex); + throw; + } + } + + + #endregion + + + #region BusinessApi Tests + + + [Fact] + public async Task TestGetBusiness_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Create API instance with mock HTTP client if in mock mode + var api = UseMockMode && MockHttpClient != null + ? new BusinessApi(MockHttpClient, ApiConfiguration) + : new BusinessApi(ApiConfiguration); + + var result = await api.GetBusinessAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetBusiness", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetBusiness"); + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetBusiness", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateBusiness_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateBusiness - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateBusiness", ex); + throw; + } + } + + + #endregion + + + #region TimezonesApi Tests + + + [Fact] + public async Task TestGetTimezones_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Create API instance with mock HTTP client if in mock mode + var api = UseMockMode && MockHttpClient != null + ? new TimezonesApi(MockHttpClient, ApiConfiguration) + : new TimezonesApi(ApiConfiguration); + + var result = await api.GetTimezonesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetTimezones", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetTimezones"); + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetTimezones", ex); + throw; + } + } + + + #endregion + + + #region CallbacksApi Tests + + + [Fact] + public async Task TestDeleteCallbackURLs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteCallbackURLs - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteCallbackURLs", ex); + throw; + } + } + + + [Fact] + public async Task TestGetCallbackURLs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetCallbackURLs - requires parameters: app_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetCallbackURLs", ex); + throw; + } + } + + + [Fact] + public async Task TestAddRedirectCallbackURLs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping AddRedirectCallbackURLs - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddRedirectCallbackURLs", ex); + throw; + } + } + + + [Fact] + public async Task TestReplaceRedirectCallbackURLs_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping ReplaceRedirectCallbackURLs - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ReplaceRedirectCallbackURLs", ex); + throw; + } + } + + + #endregion + + + #region EnvironmentsApi Tests + + + [Fact] + public async Task TestGetEnvironment_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Create API instance with mock HTTP client if in mock mode + var api = UseMockMode && MockHttpClient != null + ? new EnvironmentsApi(MockHttpClient, ApiConfiguration) + : new EnvironmentsApi(ApiConfiguration); + + var result = await api.GetEnvironmentAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetEnvironment", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetEnvironment"); + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetEnvironment", ex); + throw; + } + } + + + #endregion + + + #region EnvironmentVariablesApi Tests + + + [Fact] + public async Task TestGetEnvironmentVariables_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Create API instance with mock HTTP client if in mock mode + var api = UseMockMode && MockHttpClient != null + ? new EnvironmentVariablesApi(MockHttpClient, ApiConfiguration) + : new EnvironmentVariablesApi(ApiConfiguration); + + var result = await api.GetEnvironmentVariablesAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetEnvironmentVariables", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetEnvironmentVariables"); + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetEnvironmentVariables", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateEnvironmentVariable_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateEnvironmentVariable - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateEnvironmentVariable", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteEnvironmentVariable_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteEnvironmentVariable - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteEnvironmentVariable", ex); + throw; + } + } + + + [Fact] + public async Task TestGetEnvironmentVariable_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetEnvironmentVariable - requires parameters: variable_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetEnvironmentVariable", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateEnvironmentVariable_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateEnvironmentVariable - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateEnvironmentVariable", ex); + throw; + } + } + + + #endregion + + + #region IdentitiesApi Tests + + + [Fact] + public async Task TestDeleteIdentity_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteIdentity - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteIdentity", ex); + throw; + } + } + + + [Fact] + public async Task TestGetIdentity_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetIdentity - requires parameters: identity_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetIdentity", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateIdentity_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateIdentity - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateIdentity", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateUserIdentity_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateUserIdentity - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateUserIdentity", ex); + throw; + } + } + + + #endregion + + + #region MFAApi Tests + + + [Fact] + public async Task TestReplaceMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping ReplaceMFA - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ReplaceMFA", ex); + throw; + } + } + + + #endregion + + + #region OrganizationsApi Tests + + + [Fact] + public async Task TestGetOrganization_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganization - requires parameters: code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganization", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateOrganization_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateOrganization - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateOrganization", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizations_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizations - requires parameters: sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizations", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganization_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganization - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganization", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganization_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateOrganization - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganization", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationUsers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationUsers - requires parameters: sort, page_size, next_token, org_code, permissions, roles"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationUsers", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganizationUsers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateOrganizationUsers - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganizationUsers", ex); + throw; + } + } + + + [Fact] + public async Task TestAddOrganizationUsers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping AddOrganizationUsers - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddOrganizationUsers", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationUserRoles_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationUserRoles - requires parameters: org_code, user_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationUserRoles", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateOrganizationUserRole_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateOrganizationUserRole - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateOrganizationUserRole", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationUserRole_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationUserRole - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationUserRole", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationUserPermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationUserPermissions - requires parameters: org_code, user_id, expand"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationUserPermissions", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateOrganizationUserPermission_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateOrganizationUserPermission - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateOrganizationUserPermission", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationUserPermission_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationUserPermission - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationUserPermission", ex); + throw; + } + } + + + [Fact] + public async Task TestRemoveOrganizationUser_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping RemoveOrganizationUser - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "RemoveOrganizationUser", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationUserAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationUserAPIScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationUserAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestAddOrganizationUserAPIScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping AddOrganizationUserAPIScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddOrganizationUserAPIScope", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationFeatureFlagOverrides_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationFeatureFlagOverrides - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationFeatureFlagOverrides", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationFeatureFlags_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationFeatureFlags - requires parameters: org_code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationFeatureFlags", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationFeatureFlagOverride_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationFeatureFlagOverride - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationFeatureFlagOverride", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganizationFeatureFlagOverride_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping UpdateOrganizationFeatureFlagOverride - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganizationFeatureFlagOverride", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganizationProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping UpdateOrganizationProperty - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganizationProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationPropertyValues_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationPropertyValues - requires parameters: org_code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationPropertyValues", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganizationProperties_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateOrganizationProperties - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganizationProperties", ex); + throw; + } + } + + + [Fact] + public async Task TestReplaceOrganizationMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping ReplaceOrganizationMFA - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ReplaceOrganizationMFA", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationHandle_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationHandle - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationHandle", ex); + throw; + } + } + + + [Fact] + public async Task TestReadOrganizationLogo_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping ReadOrganizationLogo - requires parameters: org_code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ReadOrganizationLogo", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteOrganizationLogo_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteOrganizationLogo - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteOrganizationLogo", ex); + throw; + } + } + + + [Fact] + public async Task TestAddOrganizationLogo_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping AddOrganizationLogo - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddOrganizationLogo", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrganizationConnections_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrganizationConnections - requires parameters: organization_code"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrganizationConnections", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateOrganizationSessions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateOrganizationSessions - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateOrganizationSessions", ex); + throw; + } + } + + + #endregion + + + #region UsersApi Tests + + + [Fact] + public async Task TestResetOrgUserMFAAll_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping ResetOrgUserMFAAll - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ResetOrgUserMFAAll", ex); + throw; + } + } + + + [Fact] + public async Task TestGetOrgUserMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetOrgUserMFA - requires parameters: org_code, user_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetOrgUserMFA", ex); + throw; + } + } + + + [Fact] + public async Task TestResetOrgUserMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping ResetOrgUserMFA - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ResetOrgUserMFA", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUsers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUsers - requires parameters: page_size, user_id, next_token, email, username, expand, has_organization"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUsers", ex); + throw; + } + } + + + [Fact] + public async Task TestRefreshUserClaims_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping RefreshUserClaims - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "RefreshUserClaims", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteUser_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteUser - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteUser", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUserData_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUserData - requires parameters: id, expand"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUserData", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateUser_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateUser - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateUser", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateUser_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateUser - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateUser", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateUserFeatureFlagOverride_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping UpdateUserFeatureFlagOverride - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateUserFeatureFlagOverride", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateUserProperties_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateUserProperties - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateUserProperties", ex); + throw; + } + } + + + [Fact] + public async Task TestSetUserPassword_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping SetUserPassword - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "SetUserPassword", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUserIdentities_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUserIdentities - requires parameters: user_id, starting_after, ending_before"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUserIdentities", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteUserSessions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteUserSessions - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteUserSessions", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUserSessions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUserSessions - requires parameters: user_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUserSessions", ex); + throw; + } + } + + + [Fact] + public async Task TestResetUsersMFAAll_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping ResetUsersMFAAll - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ResetUsersMFAAll", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUsersMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUsersMFA - requires parameters: user_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUsersMFA", ex); + throw; + } + } + + + [Fact] + public async Task TestResetUsersMFA_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping ResetUsersMFA - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "ResetUsersMFA", ex); + throw; + } + } + + + #endregion + + + #region PermissionsApi Tests + + + [Fact] + public async Task TestGetPermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetPermissions - requires parameters: sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetPermissions", ex); + throw; + } + } + + + [Fact] + public async Task TestCreatePermission_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreatePermission - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreatePermission", ex); + throw; + } + } + + + [Fact] + public async Task TestDeletePermission_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeletePermission - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeletePermission", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdatePermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdatePermissions - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdatePermissions", ex); + throw; + } + } + + + [Fact] + public async Task TestGetRolePermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetRolePermissions - requires parameters: role_id, sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRolePermissions", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateRolePermissions_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateRolePermissions - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateRolePermissions", ex); + throw; + } + } + + + [Fact] + public async Task TestRemoveRolePermission_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping RemoveRolePermission - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "RemoveRolePermission", ex); + throw; + } + } + + + #endregion + + + #region PropertiesApi Tests + + + [Fact] + public async Task TestCreateProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateProperty - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteProperty - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateProperty - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateUserProperty_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping UpdateUserProperty - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateUserProperty", ex); + throw; + } + } + + + [Fact] + public async Task TestGetUserPropertyValues_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetUserPropertyValues - requires parameters: user_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetUserPropertyValues", ex); + throw; + } + } + + + #endregion + + + #region PropertyCategoriesApi Tests + + + [Fact] + public async Task TestCreateCategory_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateCategory - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateCategory", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateCategory_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateCategory - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateCategory", ex); + throw; + } + } + + + #endregion + + + #region RolesApi Tests + + + [Fact] + public async Task TestGetRoles_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetRoles - requires parameters: sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRoles", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateRole_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateRole - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateRole", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteRole_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteRole - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteRole", ex); + throw; + } + } + + + [Fact] + public async Task TestGetRole_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetRole - requires parameters: role_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRole", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateRoles_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateRoles - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateRoles", ex); + throw; + } + } + + + [Fact] + public async Task TestGetRoleScopes_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetRoleScopes - requires parameters: role_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetRoleScopes", ex); + throw; + } + } + + + [Fact] + public async Task TestAddRoleScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping AddRoleScope - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "AddRoleScope", ex); + throw; + } + } + + + [Fact] + public async Task TestDeleteRoleScope_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteRoleScope - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteRoleScope", ex); + throw; + } + } + + + #endregion + + + #region SearchApi Tests + + + [Fact] + public async Task TestSearchUsers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping SearchUsers - requires parameters: page_size, query, properties, starting_after, ending_before, expand"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "SearchUsers", ex); + throw; + } + } + + + #endregion + + + #region SubscribersApi Tests + + + [Fact] + public async Task TestGetSubscribers_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetSubscribers - requires parameters: sort, page_size, next_token"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetSubscribers", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateSubscriber_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping CreateSubscriber - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateSubscriber", ex); + throw; + } + } + + + [Fact] + public async Task TestGetSubscriber_Converter() + { + SkipIfNotConfigured(); + + try + { + + // This endpoint requires parameters - skipping automatic test + // TODO: Add manual test with appropriate parameters + _output.WriteLine($"Skipping GetSubscriber - requires parameters: subscriber_id"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetSubscriber", ex); + throw; + } + } + + + #endregion + + + #region WebhooksApi Tests + + + [Fact] + public async Task TestDeleteWebHook_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Unknown operation type + _output.WriteLine($"Skipping DeleteWebHook - unknown operation type"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "DeleteWebHook", ex); + throw; + } + } + + + [Fact] + public async Task TestUpdateWebHook_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping UpdateWebHook - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "UpdateWebHook", ex); + throw; + } + } + + + [Fact] + public async Task TestGetWebHooks_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Create API instance with mock HTTP client if in mock mode + var api = UseMockMode && MockHttpClient != null + ? new WebhooksApi(MockHttpClient, ApiConfiguration) + : new WebhooksApi(ApiConfiguration); + + var result = await api.GetWebHooksAsync(); + + Assert.NotNull(result); + + // Show detailed output + TestOutputHelper.WriteResponseDetails(_output, "GetWebHooks", result); + + // Test serialization round-trip + TestSerializationRoundTrip(result, "GetWebHooks"); + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "GetWebHooks", ex); + throw; + } + } + + + [Fact] + public async Task TestCreateWebHook_Converter() + { + SkipIfNotConfigured(); + + try + { + + // Write operation - skipping to avoid modifying data + // TODO: Add manual test with appropriate request object + _output.WriteLine($"Skipping CreateWebHook - write operation"); + return; + + } + catch (Exception ex) + { + TestOutputHelper.WriteError(_output, "CreateWebHook", ex); + throw; + } + } + + + #endregion + + + #region Helper Methods + + /// + /// Tests serialization/deserialization round-trip for a response object + /// + private void TestSerializationRoundTrip(T original, string testName) where T : class + { + try + { + // Get the standard converters from ApiClient using reflection + var apiClientType = typeof(Kinde.Api.Client.ApiClient); + var helperType = apiClientType.Assembly.GetType("Kinde.Api.Client.JsonConverterHelper"); + IList converters; + if (helperType != null) + { + var method = helperType.GetMethod("CreateStandardConverters", + System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.Static); + if (method != null) + { + converters = (IList)method.Invoke(null, null)!; + } + else + { + throw new InvalidOperationException("Could not find CreateStandardConverters method"); + } + } + else + { + throw new InvalidOperationException("Could not find JsonConverterHelper type"); + } + + var settings = new JsonSerializerSettings + { + Converters = converters, + NullValueHandling = NullValueHandling.Ignore + }; + + // Serialize + var json = JsonConvert.SerializeObject(original, settings); + Assert.False(string.IsNullOrEmpty(json), + $"{testName}: Serialization produced empty JSON"); + + // Deserialize + var deserialized = JsonConvert.DeserializeObject(json, settings); + Assert.NotNull(deserialized); + + // Round-trip comparison + var originalJson = JsonConvert.SerializeObject(original, settings); + var deserializedJson = JsonConvert.SerializeObject(deserialized, settings); + + Assert.Equal(originalJson, deserializedJson); + + // Use enhanced output helper + TestOutputHelper.WriteSerializationTest(_output, testName, json.Length, true); + } + catch (Exception ex) + { + TestOutputHelper.WriteSerializationTest(_output, testName, 0, false); + _output.WriteLine($"Serialization error: {ex.Message}"); + throw; + } + } + + #endregion + } +} diff --git a/Kinde.Api.Test/Integration/GeneratedMockResponses.cs b/Kinde.Api.Test/Integration/GeneratedMockResponses.cs new file mode 100644 index 0000000..776a863 --- /dev/null +++ b/Kinde.Api.Test/Integration/GeneratedMockResponses.cs @@ -0,0 +1,32 @@ +// +// This file is automatically generated by the mock response generator. +// DO NOT EDIT THIS FILE MANUALLY - your changes will be overwritten. +// To regenerate this file, run: python generate_mock_responses.py --spec --output +// + +using System; +using System.Net; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Auto-generated mock response data for integration tests. + /// This class is automatically generated - do not edit manually. + /// + public static class GeneratedMockResponses + { + /// + /// Sets up all mock responses in the provided MockHttpHandler. + /// This method is automatically generated - do not edit manually. + /// + public static void SetupResponses(MockHttpHandler handler) + { + if (handler == null) + throw new ArgumentNullException(nameof(handler)); + + // This file will be populated when generate-all-apis.sh is run + // Mock responses are generated from the OpenAPI specification + } + } +} + diff --git a/Kinde.Api.Test/Integration/HOW_TO_RUN_TESTS.md b/Kinde.Api.Test/Integration/HOW_TO_RUN_TESTS.md new file mode 100644 index 0000000..e05fe3e --- /dev/null +++ b/Kinde.Api.Test/Integration/HOW_TO_RUN_TESTS.md @@ -0,0 +1,258 @@ +# How to Run Integration Tests + +This guide explains how to run integration tests in both **Mock Mode** (for CI/CD) and **Real API Mode** (for local development). + +## Quick Reference + +### Mock Mode (CI/CD - No Credentials Required) +```bash +USE_MOCK_MODE=true dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +### Real API Mode (Local Development - Requires Credentials) +```bash +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +--- + +## Mock Mode (CI/CD Testing) + +Mock mode uses predefined mock HTTP responses. **No Kinde credentials required.** Perfect for: +- GitHub Actions +- CI/CD pipelines +- Quick local testing without API access +- Testing converter logic without network calls + +### Option 1: Environment Variable (Recommended for CI/CD) + +```bash +# Linux/macOS +export USE_MOCK_MODE=true +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" + +# Windows PowerShell +$env:USE_MOCK_MODE="true" +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" + +# Windows CMD +set USE_MOCK_MODE=true +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" + +# Inline (all platforms) +USE_MOCK_MODE=true dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +### Option 2: Configuration File + +Create or update `Kinde.Api.Test/appsettings.json`: + +```json +{ + "KindeManagementApi": { + "UseMockMode": true + } +} +``` + +Then run: +```bash +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +### GitHub Actions Example + +```yaml +- name: Run Integration Tests (Mock Mode) + env: + USE_MOCK_MODE: "true" + run: | + dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +--- + +## Real API Mode (Local Development) + +Real API mode makes actual HTTP requests to Kinde servers. **Requires M2M credentials.** + +### Step 1: Configure Credentials + +Choose one method: + +#### Method A: Environment Variables + +```bash +# Linux/macOS +export KINDE_DOMAIN="https://your-business.kinde.com" +export KINDE_CLIENT_ID="your_m2m_client_id" +export KINDE_CLIENT_SECRET="your_m2m_client_secret" +export KINDE_AUDIENCE="https://your-business.kinde.com/api" +export KINDE_SCOPE="read:users read:organizations read:applications read:roles read:permissions read:properties" + +# Windows PowerShell +$env:KINDE_DOMAIN="https://your-business.kinde.com" +$env:KINDE_CLIENT_ID="your_m2m_client_id" +$env:KINDE_CLIENT_SECRET="your_m2m_client_secret" +$env:KINDE_AUDIENCE="https://your-business.kinde.com/api" +$env:KINDE_SCOPE="read:users read:organizations read:applications read:roles read:permissions read:properties" +``` + +#### Method B: Configuration File + +Create `Kinde.Api.Test/appsettings.json`: + +```json +{ + "KindeManagementApi": { + "UseMockMode": false, + "Domain": "https://your-business.kinde.com", + "ClientId": "your_m2m_client_id_here", + "ClientSecret": "your_m2m_client_secret_here", + "Audience": "https://your-business.kinde.com/api", + "Scope": "read:users read:organizations read:applications read:roles read:permissions read:properties" + } +} +``` + +**Note:** Make sure `UseMockMode` is `false` or not set. + +### Step 2: Run Tests + +```bash +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +--- + +## Test Output + +Both modes provide enhanced output showing: +- Response type information +- Key properties from API responses +- Full JSON response (truncated if > 2000 chars) +- Serialization round-trip test results +- Error details with stack traces + +### Example Output (Mock Mode) + +``` +═══════════════════════════════════════════════════════════════ +Test: GetBusiness +═══════════════════════════════════════════════════════════════ +Response Type: GetBusinessResponse + +Key Properties: + code: OK + message: Success + business: {...} + +Full Response JSON: +{ + "code": "OK", + "message": "Success", + "business": { + "code": "bus_test123", + "name": "Test Business", + ... + } +} +═══════════════════════════════════════════════════════════════ +✓ GetBusiness: Success +``` + +--- + +## Verifying Mode + +The test output will indicate which mode is active: + +### Mock Mode +``` +✓ Using MOCK mode for integration tests (CI/CD mode) +``` + +### Real API Mode +``` +✓ M2M authentication successful for integration tests (REAL API mode) +``` + +--- + +## Troubleshooting + +### Tests Skipped / Not Configured + +If you see: +``` +WARNING: M2M credentials not configured. Integration tests will be skipped. +``` + +**Solutions:** +1. For mock mode: Set `USE_MOCK_MODE=true` or `UseMockMode: true` in config +2. For real mode: Configure credentials (see "Real API Mode" above) + +### Authentication Failed (Real Mode) + +If you see: +``` +WARNING: Failed to obtain access token. Integration tests will be skipped. +``` + +**Check:** +- Credentials are correct +- Domain URL is correct (include `https://`) +- M2M application is properly configured in Kinde +- Scopes are correct for your M2M application + +### Mock Mode Not Working + +If mock mode isn't activating: + +1. **Check environment variable:** + ```bash + echo $USE_MOCK_MODE # Should output "true" + ``` + +2. **Check config file:** + ```bash + cat Kinde.Api.Test/appsettings.json | grep UseMockMode + ``` + +3. **Ensure it's not overridden:** + - Real credentials will take precedence if configured + - Remove real credentials if you want mock mode + +--- + +## Running Specific Tests + +### All Integration Tests +```bash +dotnet test --filter "FullyQualifiedName~Integration" +``` + +### Only Generated Tests +```bash +dotnet test --filter "FullyQualifiedName~GeneratedConverterIntegrationTests" +``` + +### Only Manual Tests +```bash +dotnet test --filter "FullyQualifiedName~ConverterIntegrationTests" +``` + +### Specific Test Method +```bash +dotnet test --filter "FullyQualifiedName~TestGetBusiness_Converter" +``` + +--- + +## Summary + +| Mode | Command | Credentials Required | Use Case | +|------|---------|---------------------|----------| +| **Mock** | `USE_MOCK_MODE=true dotnet test ...` | ❌ No | CI/CD, quick testing | +| **Real** | `dotnet test ...` | ✅ Yes | Local development, full validation | + diff --git a/Kinde.Api.Test/Integration/M2MAuthenticationHelper.cs b/Kinde.Api.Test/Integration/M2MAuthenticationHelper.cs new file mode 100644 index 0000000..d0b0331 --- /dev/null +++ b/Kinde.Api.Test/Integration/M2MAuthenticationHelper.cs @@ -0,0 +1,76 @@ +using System; +using System.Collections.Generic; +using System.Net.Http; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Helper class for M2M (Machine-to-Machine) authentication with Kinde Management API + /// + public static class M2MAuthenticationHelper + { + /// + /// Gets an access token using client credentials flow + /// + /// Kinde business domain (e.g., https://your-business.kinde.com) + /// M2M application client ID + /// M2M application client secret + /// API audience (typically https://your-business.kinde.com/api) + /// Optional scope string + /// Access token or null if authentication failed + public static async Task GetAccessTokenAsync( + string domain, + string clientId, + string clientSecret, + string audience, + string? scope = null) + { + if (string.IsNullOrWhiteSpace(domain)) + throw new ArgumentException("Domain cannot be null or empty", nameof(domain)); + if (string.IsNullOrWhiteSpace(clientId)) + throw new ArgumentException("ClientId cannot be null or empty", nameof(clientId)); + if (string.IsNullOrWhiteSpace(clientSecret)) + throw new ArgumentException("ClientSecret cannot be null or empty", nameof(clientSecret)); + if (string.IsNullOrWhiteSpace(audience)) + throw new ArgumentException("Audience cannot be null or empty", nameof(audience)); + + try + { + using var httpClient = new HttpClient(); + var tokenUrl = $"{domain.TrimEnd('/')}/oauth2/token"; + + var requestParams = new List> + { + new("grant_type", "client_credentials"), + new("client_id", clientId), + new("client_secret", clientSecret), + new("audience", audience) + }; + + if (!string.IsNullOrWhiteSpace(scope)) + { + requestParams.Add(new KeyValuePair("scope", scope)); + } + + var requestContent = new FormUrlEncodedContent(requestParams); + var response = await httpClient.PostAsync(tokenUrl, requestContent); + + response.EnsureSuccessStatusCode(); + + var jsonResponse = await response.Content.ReadAsStringAsync(); + var tokenData = JsonConvert.DeserializeObject(jsonResponse); + + return tokenData?["access_token"]?.ToString(); + } + catch (Exception ex) + { + throw new InvalidOperationException( + $"Failed to obtain access token: {ex.Message}", ex); + } + } + } +} + diff --git a/Kinde.Api.Test/Integration/MockHttpHandler.cs b/Kinde.Api.Test/Integration/MockHttpHandler.cs new file mode 100644 index 0000000..753f0ec --- /dev/null +++ b/Kinde.Api.Test/Integration/MockHttpHandler.cs @@ -0,0 +1,428 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Net.Http; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Mock HTTP message handler for integration tests + /// Returns predefined responses based on request patterns + /// + public class MockHttpHandler : HttpMessageHandler + { + // Store JSON strings instead of HttpResponseMessage to avoid content reuse issues + private readonly Dictionary _responses = new(); + private readonly Dictionary> _responseFactories = new(); + + public MockHttpHandler() + { + SetupDefaultResponses(); + // Load auto-generated mock responses + GeneratedMockResponses.SetupResponses(this); + } + + /// + /// Sets up default mock responses for common endpoints + /// + private void SetupDefaultResponses() + { + // GetBusiness endpoint + AddResponse("GET", "/api/v1/business", new + { + code = "OK", + message = "Success", + business = new + { + code = "bus_test123", + name = "Test Business", + phone = "+1234567890", + email = "test@example.com", + industry = "Technology", + timezone = "America/Los_Angeles", + privacy_url = "https://example.com/privacy", + terms_url = "https://example.com/terms", + has_clickwrap = true, + has_kinde_branding = false, + created_on = "2024-01-01T00:00:00Z" + } + }); + + // GetEnvironment endpoint + AddResponse("GET", "/api/v1/environment", new + { + code = "OK", + message = "Success", + environment = new + { + code = "production", + name = "Production", + is_default = true, + is_live = true, + kinde_domain = "test.kinde.com", + created_on = "2024-01-01T00:00:00Z" + } + }); + + // GetEnvironmentVariables endpoint + AddResponse("GET", "/api/v1/environment-variables", new + { + code = "OK", + message = "Success", + environment_variables = new[] + { + new { key = "API_KEY", value = "secret_value", is_secret = true }, + new { key = "DEBUG_MODE", value = "false", is_secret = false } + } + }); + + // GetOrganizations endpoint + AddResponse("GET", "/api/v1/organizations", new + { + code = "OK", + message = "Success", + organizations = new[] + { + new { code = "org_001", name = "Organization 1", is_default = true }, + new { code = "org_002", name = "Organization 2", is_default = false } + }, + next_token = (string?)null + }); + + // GetAPIs endpoint + AddResponse("GET", "/api/v1/apis", new + { + code = "OK", + message = "Success", + apis = new[] + { + new + { + id = "api_001", + name = "Test API", + audience = "https://api.example.com", + is_management_api = true + } + } + }); + + // GetApplications endpoint + AddResponse("GET", "/api/v1/applications", new + { + code = "OK", + message = "Success", + applications = new[] + { + new + { + id = "app_001", + name = "Test Application", + type = "reg", + client_id = "client_123" + } + } + }); + + // GetRoles endpoint + AddResponse("GET", "/api/v1/roles", new + { + code = "OK", + message = "Success", + roles = new[] + { + new { id = "role_001", name = "Admin", key = "admin" }, + new { id = "role_002", name = "User", key = "user" } + } + }); + + // GetPermissions endpoint + AddResponse("GET", "/api/v1/permissions", new + { + code = "OK", + message = "Success", + permissions = new[] + { + new { id = "perm_001", name = "read:users", key = "read:users" }, + new { id = "perm_002", name = "write:users", key = "write:users" } + } + }); + + // GetProperties endpoint + AddResponse("GET", "/api/v1/properties", new + { + code = "OK", + message = "Success", + properties = new[] + { + new { id = "prop_001", name = "theme", key = "theme", type = "str" } + } + }); + + // GetTimezones endpoint + AddResponse("GET", "/api/v1/timezones", new + { + code = "OK", + message = "Success", + timezones = new[] + { + new { key = "America/Los_Angeles", name = "Pacific Time" }, + new { key = "America/New_York", name = "Eastern Time" } + } + }); + + // GetIndustries endpoint + AddResponse("GET", "/api/v1/industries", new + { + code = "OK", + message = "Success", + industries = new[] + { + new { name = "Technology" }, + new { name = "Healthcare" } + } + }); + + // GetConnections endpoint + AddResponse("GET", "/api/v1/connections", new + { + code = "OK", + message = "Success", + connections = new[] + { + new + { + id = "conn_001", + name = "Test Connection", + type = "saml" + } + } + }); + + // GetWebHooks endpoint + AddResponse("GET", "/api/v1/webhooks", new + { + code = "OK", + message = "Success", + webhooks = new[] + { + new + { + id = "webhook_001", + endpoint = "https://example.com/webhook", + events = new[] { "user.created", "user.updated" } + } + } + }); + + // GetCategories endpoint (Property Categories) + AddResponse("GET", "/api/v1/property_categories", new + { + code = "OK", + message = "Success", + property_categories = new[] + { + new { id = "cat_001", name = "User Properties" }, + new { id = "cat_002", name = "Organization Properties" } + } + }); + + // GetSubscribers endpoint + AddResponse("GET", "/api/v1/subscribers", new + { + code = "OK", + message = "Success", + subscribers = new[] + { + new + { + id = "sub_001", + email = "subscriber@example.com", + full_name = "Test Subscriber" + } + } + }); + + // Parameterized endpoints - use more specific path patterns + // GetAPIScopes by ID - /api/v1/apis/{api_id}/scopes (more specific, check first) + AddResponse("GET", "/api/v1/apis/", new + { + code = "OK", + message = "Success", + scopes = new[] + { + new { id = "scope_001", name = "read:users" }, + new { id = "scope_002", name = "write:users" } + } + }, pathContains: "/scopes"); + + // GetAPI by ID - /api/v1/apis/{api_id} (less specific, check after scopes) + AddResponse("GET", "/api/v1/apis/", new + { + code = "OK", + message = "Success", + api = new + { + id = "api_001", + name = "Test API", + audience = "https://api.example.com", + is_management_api = true + } + }); + + // GetRolePermissions by ID - /api/v1/roles/{role_id}/permissions (most specific) + AddResponse("GET", "/api/v1/roles/", new + { + code = "OK", + message = "Success", + permissions = new[] + { + new { id = "perm_001", name = "read:users", key = "read:users" } + } + }, pathContains: "/permissions"); + + // GetRoleScopes by ID - /api/v1/roles/{role_id}/scopes + AddResponse("GET", "/api/v1/roles/", new + { + code = "OK", + message = "Success", + scopes = new[] + { + new { id = "scope_001", name = "read:users" } + } + }, pathContains: "/scopes"); + + // GetRole by ID - /api/v1/roles/{role_id} + AddResponse("GET", "/api/v1/roles/", new + { + code = "OK", + message = "Success", + role = new + { + id = "role_001", + name = "Admin", + key = "admin" + } + }); + + // GetApplication by ID - /api/v1/applications/{application_id} + AddResponse("GET", "/api/v1/applications/", new + { + code = "OK", + message = "Success", + application = new + { + id = "app_001", + name = "Test Application", + type = "reg", + client_id = "client_123" + } + }); + } + + /// + /// Adds a mock response for a specific HTTP method and path pattern + /// + public void AddResponse(string method, string pathPattern, object responseData, HttpStatusCode statusCode = HttpStatusCode.OK, string? pathContains = null) + { + var key = $"{method.ToUpper()}:{pathPattern}"; + if (!string.IsNullOrEmpty(pathContains)) + { + key += $":{pathContains}"; + } + var json = JsonConvert.SerializeObject(responseData); + // Store JSON string to avoid content reuse issues + _responses[key] = (statusCode, json); + } + + /// + /// Adds a mock response from a JSON string (used by generated mock responses) + /// + public void AddResponseFromJson(string method, string pathPattern, string jsonContent, HttpStatusCode statusCode = HttpStatusCode.OK) + { + var key = $"{method.ToUpper()}:{pathPattern}"; + _responses[key] = (statusCode, jsonContent); + } + + /// + /// Adds a dynamic response factory for a specific HTTP method and path pattern + /// + public void AddResponseFactory(string method, string pathPattern, Func factory) + { + var key = $"{method.ToUpper()}:{pathPattern}"; + _responseFactories[key] = factory; + } + + protected override Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + { + var method = request.Method.Method; + var path = request.RequestUri?.AbsolutePath ?? ""; + var fullUrl = request.RequestUri?.ToString() ?? ""; + + // Debug: Log the request (can be removed later) + System.Diagnostics.Debug.WriteLine($"MockHttpHandler: {method} {path} (full: {fullUrl})"); + + // Try to find exact match first + var exactKey = $"{method}:{path}"; + if (_responses.TryGetValue(exactKey, out var exactResponse)) + { + System.Diagnostics.Debug.WriteLine($"MockHttpHandler: Found exact match for {exactKey}"); + var response = new HttpResponseMessage(exactResponse.StatusCode) + { + Content = new StringContent(exactResponse.JsonContent, Encoding.UTF8, "application/json") + }; + return Task.FromResult(response); + } + + // Try to find path pattern match (check most specific patterns first) + // Sort by key length descending to check more specific patterns first + var sortedResponses = _responses.OrderByDescending(kvp => kvp.Key.Length); + foreach (var kvp in sortedResponses) + { + var keyParts = kvp.Key.Split(':'); + if (keyParts.Length >= 2 && keyParts[0] == method && path.StartsWith(keyParts[1])) + { + // If there's a pathContains requirement (keyParts[2]), check it + if (keyParts.Length >= 3) + { + var pathContains = keyParts[2]; + if (!path.Contains(pathContains)) + { + continue; // Skip this response, path doesn't contain required substring + } + } + + System.Diagnostics.Debug.WriteLine($"MockHttpHandler: Found pattern match for {kvp.Key}"); + var response = new HttpResponseMessage(kvp.Value.StatusCode) + { + Content = new StringContent(kvp.Value.JsonContent, Encoding.UTF8, "application/json") + }; + return Task.FromResult(response); + } + } + + // Try response factories + foreach (var kvp in _responseFactories) + { + var keyParts = kvp.Key.Split(':', 2); + if (keyParts.Length == 2 && keyParts[0] == method && path.StartsWith(keyParts[1])) + { + System.Diagnostics.Debug.WriteLine($"MockHttpHandler: Using factory for {kvp.Key}"); + return Task.FromResult(kvp.Value(request)); + } + } + + // Default: return 404 with JSON (not HTML) + System.Diagnostics.Debug.WriteLine($"MockHttpHandler: No match found for {method} {path}, returning 404"); + return Task.FromResult(new HttpResponseMessage(HttpStatusCode.NotFound) + { + Content = new StringContent(JsonConvert.SerializeObject(new { error = "Not found", path, method }), Encoding.UTF8, "application/json") + }); + } + } +} + diff --git a/Kinde.Api.Test/Integration/README.md b/Kinde.Api.Test/Integration/README.md new file mode 100644 index 0000000..50627c9 --- /dev/null +++ b/Kinde.Api.Test/Integration/README.md @@ -0,0 +1,94 @@ +# Integration Tests + +This directory contains integration tests for the Kinde .NET SDK. The tests support two modes: + +## Test Modes + +### 1. Real API Mode (Local Development) + +Tests run against actual Kinde API servers. Requires M2M credentials. + +**Configuration:** + +Set environment variables or configure `appsettings.json`: + +```json +{ + "KindeManagementApi": { + "UseMockMode": false, + "Domain": "https://your-business.kinde.com", + "ClientId": "your_m2m_client_id_here", + "ClientSecret": "your_m2m_client_secret_here", + "Audience": "https://your-business.kinde.com/api", + "Scope": "read:users read:organizations read:applications read:roles read:permissions read:properties" + } +} +``` + +Or set environment variables: +- `KINDE_DOMAIN` +- `KINDE_CLIENT_ID` +- `KINDE_CLIENT_SECRET` +- `KINDE_AUDIENCE` +- `KINDE_SCOPE` (optional) + +**Running:** + +```bash +dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +### 2. Mock Mode (CI/CD) + +Tests use mock HTTP responses. No credentials required. Perfect for GitHub Actions and CI/CD pipelines. + +**Configuration:** + +Set environment variable: +```bash +export USE_MOCK_MODE=true +``` + +Or in `appsettings.json`: +```json +{ + "KindeManagementApi": { + "UseMockMode": true + } +} +``` + +**Running:** + +```bash +USE_MOCK_MODE=true dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + +## Test Output + +Tests provide detailed output including: +- Response type information +- Key properties from API responses +- Full JSON response (truncated if > 2000 chars) +- Serialization round-trip test results +- Error details with stack traces + +## Files + +- `BaseIntegrationTest.cs` - Base class supporting both test modes +- `MockHttpHandler.cs` - Mock HTTP handler for CI/CD testing +- `TestOutputHelper.cs` - Enhanced test output formatting +- `ConverterIntegrationTests.cs` - Manual integration tests +- `GeneratedConverterIntegrationTests.cs` - Auto-generated integration tests +- `M2MAuthenticationHelper.cs` - M2M authentication helper + +## GitHub Actions Example + +```yaml +- name: Run Integration Tests (Mock Mode) + env: + USE_MOCK_MODE: "true" + run: | + dotnet test Kinde.Api.Test/Kinde.Api.Test.csproj --filter "FullyQualifiedName~Integration" +``` + diff --git a/Kinde.Api.Test/Integration/TEST_GENERATION_IMPACT.md b/Kinde.Api.Test/Integration/TEST_GENERATION_IMPACT.md new file mode 100644 index 0000000..5f97ca9 --- /dev/null +++ b/Kinde.Api.Test/Integration/TEST_GENERATION_IMPACT.md @@ -0,0 +1,134 @@ +# Impact of Auto-Generation on Integration Tests + +## Overview + +The integration test infrastructure supports both **manually written tests** and **auto-generated tests**. This document explains how the auto-generation process affects the test infrastructure. + +## Test Files + +### Manually Maintained (NOT affected by regeneration) + +These files are **never overwritten** by the generation process: + +- ✅ `BaseIntegrationTest.cs` - Base class with mock mode support +- ✅ `MockHttpHandler.cs` - Mock HTTP handler for CI/CD +- ✅ `TestOutputHelper.cs` - Enhanced output formatting +- ✅ `ConverterIntegrationTests.cs` - Manually written integration tests +- ✅ `M2MAuthenticationHelper.cs` - M2M authentication helper +- ✅ `IntegrationTestFixture.cs` - Test fixture (part of BaseIntegrationTest.cs) + +### Auto-Generated (OVERWRITTEN on regeneration) + +This file is **completely regenerated** each time you run the test generator: + +- ⚠️ `GeneratedConverterIntegrationTests.cs` - Auto-generated from OpenAPI spec + +## What Happens When Tests Are Regenerated? + +### ✅ Safe - Won't Break + +1. **Base Class Compatibility**: The generated tests inherit from `BaseIntegrationTest`, which we've enhanced with: + - Mock mode support (`UseMockMode`, `MockHttpClient`) + - Enhanced configuration handling + - Both real and mock mode support + +2. **Template Updates**: The test generator template (`tools/test-generator/templates/integration_test.cs.j2`) has been updated to: + - Use mock HTTP client when in mock mode + - Use `TestOutputHelper` for enhanced output + - Follow the same patterns as manually written tests + +### ⚠️ What Gets Regenerated + +When you run `generate-all-apis.sh` or manually run the test generator: + +1. **`GeneratedConverterIntegrationTests.cs`** is completely rewritten +2. All test methods are regenerated based on the current OpenAPI spec +3. New endpoints get new test methods automatically +4. Removed endpoints have their test methods removed + +### ✅ What Stays the Same + +1. **Manually written tests** in `ConverterIntegrationTests.cs` are never touched +2. **Test infrastructure** (BaseIntegrationTest, MockHttpHandler, etc.) remains unchanged +3. **Configuration** (appsettings.json) is not modified + +## Regeneration Process + +### Current State (After Updates) + +The test generator template now generates tests that: + +```csharp +// ✅ Uses mock mode when available +var api = UseMockMode && MockHttpClient != null + ? new BusinessApi(MockHttpClient, ApiConfiguration) + : new BusinessApi(ApiConfiguration); + +// ✅ Uses enhanced output +TestOutputHelper.WriteResponseDetails(_output, "GetBusiness", result); + +// ✅ Uses enhanced serialization test output +TestSerializationRoundTrip(result, "GetBusiness"); +``` + +### Before Template Updates + +The old template generated: + +```csharp +// ❌ Didn't use mock mode +var api = new BusinessApi(ApiConfiguration); + +// ❌ Basic output only +_output.WriteLine($"✓ {testName}: Converter test passed"); +``` + +## Best Practices + +### 1. Don't Edit Generated Tests + +**Never manually edit** `GeneratedConverterIntegrationTests.cs`. Your changes will be lost on regeneration. + +### 2. Add Custom Tests to Manual File + +If you need custom test logic, add it to `ConverterIntegrationTests.cs` instead. + +### 3. Update Template for Changes + +If you want to change how **all** generated tests work, update: +- `tools/test-generator/templates/integration_test.cs.j2` + +### 4. Regenerate After API Changes + +After updating the OpenAPI spec or regenerating API clients: +```bash +./generate-all-apis.sh +``` + +This will: +- Regenerate converters +- Regenerate API clients +- **Regenerate integration tests** (with latest template) + +## Verification + +After regeneration, verify: + +1. ✅ Tests compile: `dotnet build Kinde.Api.Test/Kinde.Api.Test.csproj` +2. ✅ Mock mode works: `USE_MOCK_MODE=true dotnet test --filter "FullyQualifiedName~Generated"` +3. ✅ Real mode works: `dotnet test --filter "FullyQualifiedName~Generated"` +4. ✅ Enhanced output appears in test results + +## Summary + +| Component | Regenerated? | Impact | +|-----------|--------------|--------| +| `BaseIntegrationTest.cs` | ❌ No | Safe - manually maintained | +| `MockHttpHandler.cs` | ❌ No | Safe - manually maintained | +| `TestOutputHelper.cs` | ❌ No | Safe - manually maintained | +| `ConverterIntegrationTests.cs` | ❌ No | Safe - manually maintained | +| `GeneratedConverterIntegrationTests.cs` | ✅ Yes | Regenerated with latest template | +| Test Generator Template | ❌ No | Update manually when needed | + +**Bottom Line**: The auto-generation process is now **fully compatible** with the enhanced test infrastructure. Regenerating tests will use mock mode and enhanced output automatically. + diff --git a/Kinde.Api.Test/Integration/TestOutputHelper.cs b/Kinde.Api.Test/Integration/TestOutputHelper.cs new file mode 100644 index 0000000..772e330 --- /dev/null +++ b/Kinde.Api.Test/Integration/TestOutputHelper.cs @@ -0,0 +1,176 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Xunit.Abstractions; + +namespace Kinde.Api.Test.Integration +{ + /// + /// Helper class for formatting test output with detailed API response information + /// + public static class TestOutputHelper + { + /// + /// Formats and outputs detailed information about an API response + /// + public static void WriteResponseDetails(ITestOutputHelper output, string testName, T result) where T : class + { + output.WriteLine(""); + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + output.WriteLine($"Test: {testName}"); + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + + try + { + // Serialize to JSON for display + // Get converters using reflection (JsonConverterRegistry is internal) + var apiClientType = typeof(Kinde.Api.Client.ApiClient); + var helperType = apiClientType.Assembly.GetType("Kinde.Api.Client.JsonConverterHelper"); + IList converters; + if (helperType != null) + { + var method = helperType.GetMethod("CreateStandardConverters", + System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.Static); + if (method != null) + { + converters = (IList)method.Invoke(null, null)!; + } + else + { + converters = new List(); + } + } + else + { + converters = new List(); + } + + var settings = new JsonSerializerSettings + { + Formatting = Formatting.Indented, + NullValueHandling = NullValueHandling.Include, + Converters = converters + }; + + var json = JsonConvert.SerializeObject(result, settings); + var jsonObj = JObject.Parse(json); + + // Display summary + output.WriteLine($"Response Type: {typeof(T).Name}"); + output.WriteLine($""); + + // Display key properties + output.WriteLine("Key Properties:"); + DisplayProperties(output, jsonObj, 0, maxDepth: 2); + + // Display full JSON (truncated if too long) + output.WriteLine(""); + output.WriteLine("Full Response JSON:"); + if (json.Length > 2000) + { + output.WriteLine(json.Substring(0, 2000) + "... (truncated)"); + output.WriteLine($"Total length: {json.Length} characters"); + } + else + { + output.WriteLine(json); + } + + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + output.WriteLine($"✓ {testName}: Success"); + output.WriteLine(""); + } + catch (Exception ex) + { + output.WriteLine($"Error formatting response: {ex.Message}"); + output.WriteLine($"Response Type: {typeof(T).Name}"); + output.WriteLine($"Response: {result?.ToString() ?? "null"}"); + } + } + + private static void DisplayProperties(ITestOutputHelper output, JToken token, int depth, int maxDepth) + { + if (depth > maxDepth) return; + + var indent = new string(' ', depth * 2); + + if (token is JObject obj) + { + foreach (var prop in obj.Properties().Take(10)) // Limit to first 10 properties + { + if (prop.Value is JObject || prop.Value is JArray) + { + output.WriteLine($"{indent}{prop.Name}: {{...}}"); + if (depth < maxDepth) + { + DisplayProperties(output, prop.Value, depth + 1, maxDepth); + } + } + else + { + var value = prop.Value?.ToString() ?? "null"; + if (value.Length > 100) + { + value = value.Substring(0, 100) + "..."; + } + output.WriteLine($"{indent}{prop.Name}: {value}"); + } + } + + if (obj.Properties().Count() > 10) + { + output.WriteLine($"{indent}... ({obj.Properties().Count() - 10} more properties)"); + } + } + else if (token is JArray array) + { + output.WriteLine($"{indent}[Array with {array.Count} items]"); + if (array.Count > 0 && depth < maxDepth) + { + output.WriteLine($"{indent} First item:"); + DisplayProperties(output, array[0], depth + 1, maxDepth); + } + } + } + + /// + /// Formats error output + /// + public static void WriteError(ITestOutputHelper output, string testName, Exception ex) + { + output.WriteLine(""); + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + output.WriteLine($"✗ {testName}: FAILED"); + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + output.WriteLine($"Error: {ex.Message}"); + output.WriteLine($"Type: {ex.GetType().Name}"); + if (ex.InnerException != null) + { + output.WriteLine($"Inner Exception: {ex.InnerException.Message}"); + } + output.WriteLine($"Stack Trace:"); + output.WriteLine(ex.StackTrace); + output.WriteLine($"═══════════════════════════════════════════════════════════════"); + output.WriteLine(""); + } + + /// + /// Formats serialization round-trip test results + /// + public static void WriteSerializationTest(ITestOutputHelper output, string testName, int jsonLength, bool success) + { + if (success) + { + output.WriteLine($"✓ {testName}: Serialization round-trip successful ({jsonLength} bytes)"); + } + else + { + output.WriteLine($"✗ {testName}: Serialization round-trip failed"); + } + } + } +} + diff --git a/Kinde.Api.Test/Kinde.Api.Test.csproj b/Kinde.Api.Test/Kinde.Api.Test.csproj index a753ace..182bd4f 100644 --- a/Kinde.Api.Test/Kinde.Api.Test.csproj +++ b/Kinde.Api.Test/Kinde.Api.Test.csproj @@ -9,6 +9,9 @@ + + + all @@ -20,6 +23,15 @@ + + + PreserveNewest + + + PreserveNewest + + + enable diff --git a/Kinde.Api.Test/appsettings.json b/Kinde.Api.Test/appsettings.json new file mode 100644 index 0000000..a333a3b --- /dev/null +++ b/Kinde.Api.Test/appsettings.json @@ -0,0 +1,9 @@ +{ + "KindeManagementApi": { + "Domain": "https://burntjam.kinde.com", + "ClientId": "9891928d4e20445e8d42e92831e0d89a", + "ClientSecret": "1U6leJtMm1lMRc3b7ZGXeIGKdOnCQJkkPKWrlgw3oajBvTQ5OJI2", + "Audience": "https://burntjam.kinde.com/api" + } +} + diff --git a/Kinde.Api.Test/appsettings.json.example b/Kinde.Api.Test/appsettings.json.example new file mode 100644 index 0000000..363c12f --- /dev/null +++ b/Kinde.Api.Test/appsettings.json.example @@ -0,0 +1,11 @@ +{ + "KindeManagementApi": { + "UseMockMode": false, + "Domain": "https://your-business.kinde.com", + "ClientId": "your_m2m_client_id_here", + "ClientSecret": "your_m2m_client_secret_here", + "Audience": "https://your-business.kinde.com/api", + "Scope": "read:users read:organizations read:applications read:roles read:permissions read:properties" + } +} + diff --git a/Kinde.Api.Test/packages.lock.json b/Kinde.Api.Test/packages.lock.json index d638e4f..24110d3 100644 --- a/Kinde.Api.Test/packages.lock.json +++ b/Kinde.Api.Test/packages.lock.json @@ -2,6 +2,39 @@ "version": 1, "dependencies": { "net8.0": { + "Microsoft.Extensions.Configuration": { + "type": "Direct", + "requested": "[9.0.4, )", + "resolved": "9.0.4", + "contentHash": "KIVBrMbItnCJDd1RF4KEaE8jZwDJcDUJW5zXpbwQ05HNYTK1GveHxHK0B3SjgDJuR48GRACXAO+BLhL8h34S7g==", + "dependencies": { + "Microsoft.Extensions.Configuration.Abstractions": "9.0.4", + "Microsoft.Extensions.Primitives": "9.0.4" + } + }, + "Microsoft.Extensions.Configuration.EnvironmentVariables": { + "type": "Direct", + "requested": "[9.0.4, )", + "resolved": "9.0.4", + "contentHash": "2IGiG3FtVnD83IA6HYGuNei8dOw455C09yEhGl8bjcY6aGZgoC6yhYvDnozw8wlTowfoG9bxVrdTsr2ACZOYHg==", + "dependencies": { + "Microsoft.Extensions.Configuration": "9.0.4", + "Microsoft.Extensions.Configuration.Abstractions": "9.0.4" + } + }, + "Microsoft.Extensions.Configuration.Json": { + "type": "Direct", + "requested": "[9.0.4, )", + "resolved": "9.0.4", + "contentHash": "vVXI70CgT/dmXV3MM+n/BR2rLXEoAyoK0hQT+8MrbCMuJBiLRxnTtSrksNiASWCwOtxo/Tyy7CO8AGthbsYxnw==", + "dependencies": { + "Microsoft.Extensions.Configuration": "9.0.4", + "Microsoft.Extensions.Configuration.Abstractions": "9.0.4", + "Microsoft.Extensions.Configuration.FileExtensions": "9.0.4", + "Microsoft.Extensions.FileProviders.Abstractions": "9.0.4", + "System.Text.Json": "9.0.4" + } + }, "Microsoft.NET.Test.Sdk": { "type": "Direct", "requested": "[17.14.1, )", @@ -66,15 +99,6 @@ "Microsoft.Extensions.ObjectPool": "8.0.19" } }, - "Microsoft.Extensions.Configuration": { - "type": "Transitive", - "resolved": "9.0.4", - "contentHash": "KIVBrMbItnCJDd1RF4KEaE8jZwDJcDUJW5zXpbwQ05HNYTK1GveHxHK0B3SjgDJuR48GRACXAO+BLhL8h34S7g==", - "dependencies": { - "Microsoft.Extensions.Configuration.Abstractions": "9.0.4", - "Microsoft.Extensions.Primitives": "9.0.4" - } - }, "Microsoft.Extensions.Configuration.Abstractions": { "type": "Transitive", "resolved": "9.0.4", @@ -91,6 +115,18 @@ "Microsoft.Extensions.Configuration.Abstractions": "9.0.4" } }, + "Microsoft.Extensions.Configuration.FileExtensions": { + "type": "Transitive", + "resolved": "9.0.4", + "contentHash": "UY864WQ3AS2Fkc8fYLombWnjrXwYt+BEHHps0hY4sxlgqaVW06AxbpgRZjfYf8PyRbplJqruzZDB/nSLT+7RLQ==", + "dependencies": { + "Microsoft.Extensions.Configuration": "9.0.4", + "Microsoft.Extensions.Configuration.Abstractions": "9.0.4", + "Microsoft.Extensions.FileProviders.Abstractions": "9.0.4", + "Microsoft.Extensions.FileProviders.Physical": "9.0.4", + "Microsoft.Extensions.Primitives": "9.0.4" + } + }, "Microsoft.Extensions.DependencyInjection": { "type": "Transitive", "resolved": "9.0.4", @@ -148,6 +184,21 @@ "Microsoft.Extensions.Primitives": "9.0.4" } }, + "Microsoft.Extensions.FileProviders.Physical": { + "type": "Transitive", + "resolved": "9.0.4", + "contentHash": "qkQ9V7KFZdTWNThT7ke7E/Jad38s46atSs3QUYZB8f3thBTrcrousdY4Y/tyCtcH5YjsPSiByjuN+L8W/ThMQg==", + "dependencies": { + "Microsoft.Extensions.FileProviders.Abstractions": "9.0.4", + "Microsoft.Extensions.FileSystemGlobbing": "9.0.4", + "Microsoft.Extensions.Primitives": "9.0.4" + } + }, + "Microsoft.Extensions.FileSystemGlobbing": { + "type": "Transitive", + "resolved": "9.0.4", + "contentHash": "05Lh2ItSk4mzTdDWATW9nEcSybwprN8Tz42Fs5B+jwdXUpauktdAQUI1Am4sUQi2C63E5hvQp8gXvfwfg9mQGQ==" + }, "Microsoft.Extensions.Hosting.Abstractions": { "type": "Transitive", "resolved": "9.0.4", diff --git a/Kinde.Api/Accounts/Api/BillingApi.cs b/Kinde.Api/Accounts/Api/BillingApi.cs index 361969a..6565781 100644 --- a/Kinde.Api/Accounts/Api/BillingApi.cs +++ b/Kinde.Api/Accounts/Api/BillingApi.cs @@ -62,7 +62,7 @@ public interface IBillingApi : IApi /// Get entitlements /// /// - /// Returns all the entitlements a the user currently has access to + /// Returns all the entitlements the user currently has access to /// /// Thrown when fails to make API call /// Number of results per page. Defaults to 10 if parameter not sent. (optional) @@ -75,7 +75,7 @@ public interface IBillingApi : IApi /// Get entitlements /// /// - /// Returns all the entitlements a the user currently has access to + /// Returns all the entitlements the user currently has access to /// /// Number of results per page. Defaults to 10 if parameter not sent. (optional) /// The ID of the entitlement to start after. (optional) @@ -542,7 +542,7 @@ private void OnErrorGetEntitlementsDefaultImplementation(Exception exceptionLoca partial void OnErrorGetEntitlements(ref bool suppressDefaultLogLocalVar, Exception exceptionLocalVar, string pathFormatLocalVar, string pathLocalVar, Option pageSize, Option startingAfter); /// - /// Get entitlements Returns all the entitlements a the user currently has access to + /// Get entitlements Returns all the entitlements the user currently has access to /// /// Number of results per page. Defaults to 10 if parameter not sent. (optional) /// The ID of the entitlement to start after. (optional) @@ -561,7 +561,7 @@ private void OnErrorGetEntitlementsDefaultImplementation(Exception exceptionLoca } /// - /// Get entitlements Returns all the entitlements a the user currently has access to + /// Get entitlements Returns all the entitlements the user currently has access to /// /// Thrown when fails to make API call /// Number of results per page. Defaults to 10 if parameter not sent. (optional) diff --git a/Kinde.Api/Accounts/Converters/ErrorNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/ErrorNewtonsoftConverter.cs new file mode 100644 index 0000000..066478f --- /dev/null +++ b/Kinde.Api/Accounts/Converters/ErrorNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for Error that handles the Option<> structure + /// + public class ErrorNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Error ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Error existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new Error( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Error value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataEntitlementNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataEntitlementNewtonsoftConverter.cs new file mode 100644 index 0000000..31e4e16 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataEntitlementNewtonsoftConverter.cs @@ -0,0 +1,120 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementResponseDataEntitlement that handles the Option<> structure + /// + public class GetEntitlementResponseDataEntitlementNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementResponseDataEntitlement ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementResponseDataEntitlement existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + int? fixedCharge = default(int?); + if (jsonObject["fixed_charge"] != null) + { + fixedCharge = jsonObject["fixed_charge"].ToObject(serializer); + } + string? priceName = default(string?); + if (jsonObject["price_name"] != null) + { + priceName = jsonObject["price_name"].ToObject(); + } + int? unitAmount = default(int?); + if (jsonObject["unit_amount"] != null) + { + unitAmount = jsonObject["unit_amount"].ToObject(serializer); + } + string? featureKey = default(string?); + if (jsonObject["feature_key"] != null) + { + featureKey = jsonObject["feature_key"].ToObject(); + } + string? featureName = default(string?); + if (jsonObject["feature_name"] != null) + { + featureName = jsonObject["feature_name"].ToObject(); + } + int? entitlementLimitMax = default(int?); + if (jsonObject["entitlement_limit_max"] != null) + { + entitlementLimitMax = jsonObject["entitlement_limit_max"].ToObject(serializer); + } + int? entitlementLimitMin = default(int?); + if (jsonObject["entitlement_limit_min"] != null) + { + entitlementLimitMin = jsonObject["entitlement_limit_min"].ToObject(serializer); + } + + return new GetEntitlementResponseDataEntitlement( + id: id != null ? new Option(id) : default, fixedCharge: fixedCharge != null ? new Option(fixedCharge) : default, priceName: priceName != null ? new Option(priceName) : default, unitAmount: unitAmount != null ? new Option(unitAmount) : default, featureKey: featureKey != null ? new Option(featureKey) : default, featureName: featureName != null ? new Option(featureName) : default, entitlementLimitMax: entitlementLimitMax != null ? new Option(entitlementLimitMax) : default, entitlementLimitMin: entitlementLimitMin != null ? new Option(entitlementLimitMin) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementResponseDataEntitlement value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.FixedChargeOption.IsSet && value.FixedCharge != null) + { + writer.WritePropertyName("fixed_charge"); + serializer.Serialize(writer, value.FixedCharge); + } + if (value.PriceNameOption.IsSet && value.PriceName != null) + { + writer.WritePropertyName("price_name"); + serializer.Serialize(writer, value.PriceName); + } + if (value.UnitAmountOption.IsSet && value.UnitAmount != null) + { + writer.WritePropertyName("unit_amount"); + serializer.Serialize(writer, value.UnitAmount); + } + if (value.FeatureKeyOption.IsSet && value.FeatureKey != null) + { + writer.WritePropertyName("feature_key"); + serializer.Serialize(writer, value.FeatureKey); + } + if (value.FeatureNameOption.IsSet && value.FeatureName != null) + { + writer.WritePropertyName("feature_name"); + serializer.Serialize(writer, value.FeatureName); + } + if (value.EntitlementLimitMaxOption.IsSet && value.EntitlementLimitMax != null) + { + writer.WritePropertyName("entitlement_limit_max"); + serializer.Serialize(writer, value.EntitlementLimitMax); + } + if (value.EntitlementLimitMinOption.IsSet && value.EntitlementLimitMin != null) + { + writer.WritePropertyName("entitlement_limit_min"); + serializer.Serialize(writer, value.EntitlementLimitMin); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..1802130 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementResponseDataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementResponseData that handles the Option<> structure + /// + public class GetEntitlementResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + GetEntitlementResponseDataEntitlement? entitlement = default(GetEntitlementResponseDataEntitlement?); + if (jsonObject["entitlement"] != null) + { + entitlement = jsonObject["entitlement"].ToObject(serializer); + } + + return new GetEntitlementResponseData( + orgCode: orgCode != null ? new Option(orgCode) : default, entitlement: entitlement != null ? new Option(entitlement) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.EntitlementOption.IsSet && value.Entitlement != null) + { + writer.WritePropertyName("entitlement"); + serializer.Serialize(writer, value.Entitlement); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..4054bf4 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementResponse that handles the Option<> structure + /// + public class GetEntitlementResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetEntitlementResponseData? data = default(GetEntitlementResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + Object? metadata = default(Object?); + if (jsonObject["metadata"] != null) + { + metadata = jsonObject["metadata"].ToObject(serializer); + } + + return new GetEntitlementResponse( + data: data != null ? new Option(data) : default, metadata: metadata != null ? new Option(metadata) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + if (value.MetadataOption.IsSet && value.Metadata != null) + { + writer.WritePropertyName("metadata"); + serializer.Serialize(writer, value.Metadata); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataEntitlementsInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataEntitlementsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..cdad32b --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataEntitlementsInnerNewtonsoftConverter.cs @@ -0,0 +1,120 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementsResponseDataEntitlementsInner that handles the Option<> structure + /// + public class GetEntitlementsResponseDataEntitlementsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementsResponseDataEntitlementsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementsResponseDataEntitlementsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + int? fixedCharge = default(int?); + if (jsonObject["fixed_charge"] != null) + { + fixedCharge = jsonObject["fixed_charge"].ToObject(serializer); + } + string? priceName = default(string?); + if (jsonObject["price_name"] != null) + { + priceName = jsonObject["price_name"].ToObject(); + } + int? unitAmount = default(int?); + if (jsonObject["unit_amount"] != null) + { + unitAmount = jsonObject["unit_amount"].ToObject(serializer); + } + string? featureKey = default(string?); + if (jsonObject["feature_key"] != null) + { + featureKey = jsonObject["feature_key"].ToObject(); + } + string? featureName = default(string?); + if (jsonObject["feature_name"] != null) + { + featureName = jsonObject["feature_name"].ToObject(); + } + int? entitlementLimitMax = default(int?); + if (jsonObject["entitlement_limit_max"] != null) + { + entitlementLimitMax = jsonObject["entitlement_limit_max"].ToObject(serializer); + } + int? entitlementLimitMin = default(int?); + if (jsonObject["entitlement_limit_min"] != null) + { + entitlementLimitMin = jsonObject["entitlement_limit_min"].ToObject(serializer); + } + + return new GetEntitlementsResponseDataEntitlementsInner( + id: id != null ? new Option(id) : default, fixedCharge: fixedCharge != null ? new Option(fixedCharge) : default, priceName: priceName != null ? new Option(priceName) : default, unitAmount: unitAmount != null ? new Option(unitAmount) : default, featureKey: featureKey != null ? new Option(featureKey) : default, featureName: featureName != null ? new Option(featureName) : default, entitlementLimitMax: entitlementLimitMax != null ? new Option(entitlementLimitMax) : default, entitlementLimitMin: entitlementLimitMin != null ? new Option(entitlementLimitMin) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementsResponseDataEntitlementsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.FixedChargeOption.IsSet && value.FixedCharge != null) + { + writer.WritePropertyName("fixed_charge"); + serializer.Serialize(writer, value.FixedCharge); + } + if (value.PriceNameOption.IsSet && value.PriceName != null) + { + writer.WritePropertyName("price_name"); + serializer.Serialize(writer, value.PriceName); + } + if (value.UnitAmountOption.IsSet && value.UnitAmount != null) + { + writer.WritePropertyName("unit_amount"); + serializer.Serialize(writer, value.UnitAmount); + } + if (value.FeatureKeyOption.IsSet && value.FeatureKey != null) + { + writer.WritePropertyName("feature_key"); + serializer.Serialize(writer, value.FeatureKey); + } + if (value.FeatureNameOption.IsSet && value.FeatureName != null) + { + writer.WritePropertyName("feature_name"); + serializer.Serialize(writer, value.FeatureName); + } + if (value.EntitlementLimitMaxOption.IsSet && value.EntitlementLimitMax != null) + { + writer.WritePropertyName("entitlement_limit_max"); + serializer.Serialize(writer, value.EntitlementLimitMax); + } + if (value.EntitlementLimitMinOption.IsSet && value.EntitlementLimitMin != null) + { + writer.WritePropertyName("entitlement_limit_min"); + serializer.Serialize(writer, value.EntitlementLimitMin); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..b142160 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementsResponseData that handles the Option<> structure + /// + public class GetEntitlementsResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementsResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementsResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + List plans = default(List); + if (jsonObject["plans"] != null) + { + plans = jsonObject["plans"].ToObject>(serializer); + } + List entitlements = default(List); + if (jsonObject["entitlements"] != null) + { + entitlements = jsonObject["entitlements"].ToObject>(serializer); + } + + return new GetEntitlementsResponseData( + orgCode: orgCode != null ? new Option(orgCode) : default, plans: plans != null ? new Option?>(plans) : default, entitlements: entitlements != null ? new Option?>(entitlements) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementsResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.PlansOption.IsSet) + { + writer.WritePropertyName("plans"); + serializer.Serialize(writer, value.Plans); + } + if (value.EntitlementsOption.IsSet) + { + writer.WritePropertyName("entitlements"); + serializer.Serialize(writer, value.Entitlements); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataPlansInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataPlansInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..5b29337 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseDataPlansInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementsResponseDataPlansInner that handles the Option<> structure + /// + public class GetEntitlementsResponseDataPlansInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementsResponseDataPlansInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementsResponseDataPlansInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + DateTimeOffset? subscribedOn = default(DateTimeOffset?); + if (jsonObject["subscribed_on"] != null) + { + subscribedOn = jsonObject["subscribed_on"].ToObject(serializer); + } + + return new GetEntitlementsResponseDataPlansInner( + key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, subscribedOn: subscribedOn != null ? new Option(subscribedOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementsResponseDataPlansInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.SubscribedOnOption.IsSet && value.SubscribedOn != null) + { + writer.WritePropertyName("subscribed_on"); + serializer.Serialize(writer, value.SubscribedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementsResponseMetadataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseMetadataNewtonsoftConverter.cs new file mode 100644 index 0000000..e427d7b --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseMetadataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementsResponseMetadata that handles the Option<> structure + /// + public class GetEntitlementsResponseMetadataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementsResponseMetadata ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementsResponseMetadata existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + string? nextPageStartingAfter = default(string?); + if (jsonObject["next_page_starting_after"] != null) + { + nextPageStartingAfter = jsonObject["next_page_starting_after"].ToObject(); + } + + return new GetEntitlementsResponseMetadata( + hasMore: hasMore != null ? new Option(hasMore) : default, nextPageStartingAfter: nextPageStartingAfter != null ? new Option(nextPageStartingAfter) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementsResponseMetadata value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.NextPageStartingAfterOption.IsSet && value.NextPageStartingAfter != null) + { + writer.WritePropertyName("next_page_starting_after"); + serializer.Serialize(writer, value.NextPageStartingAfter); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetEntitlementsResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..e854149 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetEntitlementsResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetEntitlementsResponse that handles the Option<> structure + /// + public class GetEntitlementsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEntitlementsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEntitlementsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetEntitlementsResponseData? data = default(GetEntitlementsResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + GetEntitlementsResponseMetadata? metadata = default(GetEntitlementsResponseMetadata?); + if (jsonObject["metadata"] != null) + { + metadata = jsonObject["metadata"].ToObject(serializer); + } + + return new GetEntitlementsResponse( + data: data != null ? new Option(data) : default, metadata: metadata != null ? new Option(metadata) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEntitlementsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + if (value.MetadataOption.IsSet && value.Metadata != null) + { + writer.WritePropertyName("metadata"); + serializer.Serialize(writer, value.Metadata); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataFeatureFlagsInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataFeatureFlagsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..6c4ceb7 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataFeatureFlagsInnerNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetFeatureFlagsResponseDataFeatureFlagsInner that handles the Option<> structure + /// + public class GetFeatureFlagsResponseDataFeatureFlagsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetFeatureFlagsResponseDataFeatureFlagsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetFeatureFlagsResponseDataFeatureFlagsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + GetFeatureFlagsResponseDataFeatureFlagsInnerValue? value = default(GetFeatureFlagsResponseDataFeatureFlagsInnerValue?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(serializer); + } + + return new GetFeatureFlagsResponseDataFeatureFlagsInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, key: key != null ? new Option(key) : default, type: type != null ? new Option(type) : default, value: value != null ? new Option(value) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetFeatureFlagsResponseDataFeatureFlagsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..b57464c --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseDataNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetFeatureFlagsResponseData that handles the Option<> structure + /// + public class GetFeatureFlagsResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetFeatureFlagsResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetFeatureFlagsResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List featureFlags = default(List); + if (jsonObject["feature_flags"] != null) + { + featureFlags = jsonObject["feature_flags"].ToObject>(serializer); + } + + return new GetFeatureFlagsResponseData( + featureFlags: featureFlags != null ? new Option?>(featureFlags) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetFeatureFlagsResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.FeatureFlagsOption.IsSet) + { + writer.WritePropertyName("feature_flags"); + serializer.Serialize(writer, value.FeatureFlags); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..ecba206 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetFeatureFlagsResponseNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetFeatureFlagsResponse that handles the Option<> structure + /// + public class GetFeatureFlagsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetFeatureFlagsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetFeatureFlagsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetFeatureFlagsResponseData? data = default(GetFeatureFlagsResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + + return new GetFeatureFlagsResponse( + data: data != null ? new Option(data) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetFeatureFlagsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..85383e5 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPermissionsResponseData that handles the Option<> structure + /// + public class GetUserPermissionsResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPermissionsResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPermissionsResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + + return new GetUserPermissionsResponseData( + orgCode: orgCode != null ? new Option(orgCode) : default, permissions: permissions != null ? new Option?>(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPermissionsResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataPermissionsInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataPermissionsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..d1c5725 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseDataPermissionsInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPermissionsResponseDataPermissionsInner that handles the Option<> structure + /// + public class GetUserPermissionsResponseDataPermissionsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPermissionsResponseDataPermissionsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPermissionsResponseDataPermissionsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new GetUserPermissionsResponseDataPermissionsInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPermissionsResponseDataPermissionsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseMetadataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseMetadataNewtonsoftConverter.cs new file mode 100644 index 0000000..5cb5de2 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseMetadataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPermissionsResponseMetadata that handles the Option<> structure + /// + public class GetUserPermissionsResponseMetadataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPermissionsResponseMetadata ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPermissionsResponseMetadata existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + string? nextPageStartingAfter = default(string?); + if (jsonObject["next_page_starting_after"] != null) + { + nextPageStartingAfter = jsonObject["next_page_starting_after"].ToObject(); + } + + return new GetUserPermissionsResponseMetadata( + hasMore: hasMore != null ? new Option(hasMore) : default, nextPageStartingAfter: nextPageStartingAfter != null ? new Option(nextPageStartingAfter) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPermissionsResponseMetadata value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.NextPageStartingAfterOption.IsSet && value.NextPageStartingAfter != null) + { + writer.WritePropertyName("next_page_starting_after"); + serializer.Serialize(writer, value.NextPageStartingAfter); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..1f71f8c --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPermissionsResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPermissionsResponse that handles the Option<> structure + /// + public class GetUserPermissionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPermissionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPermissionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetUserPermissionsResponseData? data = default(GetUserPermissionsResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + GetUserPermissionsResponseMetadata? metadata = default(GetUserPermissionsResponseMetadata?); + if (jsonObject["metadata"] != null) + { + metadata = jsonObject["metadata"].ToObject(serializer); + } + + return new GetUserPermissionsResponse( + data: data != null ? new Option(data) : default, metadata: metadata != null ? new Option(metadata) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPermissionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + if (value.MetadataOption.IsSet && value.Metadata != null) + { + writer.WritePropertyName("metadata"); + serializer.Serialize(writer, value.Metadata); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..313bd27 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPropertiesResponseData that handles the Option<> structure + /// + public class GetUserPropertiesResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPropertiesResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPropertiesResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List properties = default(List); + if (jsonObject["properties"] != null) + { + properties = jsonObject["properties"].ToObject>(serializer); + } + + return new GetUserPropertiesResponseData( + properties: properties != null ? new Option?>(properties) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPropertiesResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.PropertiesOption.IsSet) + { + writer.WritePropertyName("properties"); + serializer.Serialize(writer, value.Properties); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataPropertiesInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataPropertiesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..adc3fe4 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseDataPropertiesInnerNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPropertiesResponseDataPropertiesInner that handles the Option<> structure + /// + public class GetUserPropertiesResponseDataPropertiesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPropertiesResponseDataPropertiesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPropertiesResponseDataPropertiesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + GetUserPropertiesResponseDataPropertiesInnerValue? value = default(GetUserPropertiesResponseDataPropertiesInnerValue?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(serializer); + } + + return new GetUserPropertiesResponseDataPropertiesInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, key: key != null ? new Option(key) : default, value: value != null ? new Option(value) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPropertiesResponseDataPropertiesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseMetadataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseMetadataNewtonsoftConverter.cs new file mode 100644 index 0000000..9941247 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseMetadataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPropertiesResponseMetadata that handles the Option<> structure + /// + public class GetUserPropertiesResponseMetadataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPropertiesResponseMetadata ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPropertiesResponseMetadata existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + string? nextPageStartingAfter = default(string?); + if (jsonObject["next_page_starting_after"] != null) + { + nextPageStartingAfter = jsonObject["next_page_starting_after"].ToObject(); + } + + return new GetUserPropertiesResponseMetadata( + hasMore: hasMore != null ? new Option(hasMore) : default, nextPageStartingAfter: nextPageStartingAfter != null ? new Option(nextPageStartingAfter) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPropertiesResponseMetadata value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.NextPageStartingAfterOption.IsSet && value.NextPageStartingAfter != null) + { + writer.WritePropertyName("next_page_starting_after"); + serializer.Serialize(writer, value.NextPageStartingAfter); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..823ca48 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserPropertiesResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserPropertiesResponse that handles the Option<> structure + /// + public class GetUserPropertiesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserPropertiesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserPropertiesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetUserPropertiesResponseData? data = default(GetUserPropertiesResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + GetUserPropertiesResponseMetadata? metadata = default(GetUserPropertiesResponseMetadata?); + if (jsonObject["metadata"] != null) + { + metadata = jsonObject["metadata"].ToObject(serializer); + } + + return new GetUserPropertiesResponse( + data: data != null ? new Option(data) : default, metadata: metadata != null ? new Option(metadata) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserPropertiesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + if (value.MetadataOption.IsSet && value.Metadata != null) + { + writer.WritePropertyName("metadata"); + serializer.Serialize(writer, value.Metadata); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataNewtonsoftConverter.cs new file mode 100644 index 0000000..87aa4f7 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserRolesResponseData that handles the Option<> structure + /// + public class GetUserRolesResponseDataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserRolesResponseData ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserRolesResponseData existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + + return new GetUserRolesResponseData( + orgCode: orgCode != null ? new Option(orgCode) : default, roles: roles != null ? new Option?>(roles) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserRolesResponseData value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataRolesInnerNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataRolesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..1af188b --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserRolesResponseDataRolesInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserRolesResponseDataRolesInner that handles the Option<> structure + /// + public class GetUserRolesResponseDataRolesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserRolesResponseDataRolesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserRolesResponseDataRolesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new GetUserRolesResponseDataRolesInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserRolesResponseDataRolesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserRolesResponseMetadataNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserRolesResponseMetadataNewtonsoftConverter.cs new file mode 100644 index 0000000..94f6869 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserRolesResponseMetadataNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserRolesResponseMetadata that handles the Option<> structure + /// + public class GetUserRolesResponseMetadataNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserRolesResponseMetadata ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserRolesResponseMetadata existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + string? nextPageStartingAfter = default(string?); + if (jsonObject["next_page_starting_after"] != null) + { + nextPageStartingAfter = jsonObject["next_page_starting_after"].ToObject(); + } + + return new GetUserRolesResponseMetadata( + hasMore: hasMore != null ? new Option(hasMore) : default, nextPageStartingAfter: nextPageStartingAfter != null ? new Option(nextPageStartingAfter) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserRolesResponseMetadata value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.NextPageStartingAfterOption.IsSet && value.NextPageStartingAfter != null) + { + writer.WritePropertyName("next_page_starting_after"); + serializer.Serialize(writer, value.NextPageStartingAfter); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/GetUserRolesResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/GetUserRolesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..35c1074 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/GetUserRolesResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserRolesResponse that handles the Option<> structure + /// + public class GetUserRolesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserRolesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserRolesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetUserRolesResponseData? data = default(GetUserRolesResponseData?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + GetUserRolesResponseMetadata? metadata = default(GetUserRolesResponseMetadata?); + if (jsonObject["metadata"] != null) + { + metadata = jsonObject["metadata"].ToObject(serializer); + } + + return new GetUserRolesResponse( + data: data != null ? new Option(data) : default, metadata: metadata != null ? new Option(metadata) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserRolesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + if (value.MetadataOption.IsSet && value.Metadata != null) + { + writer.WritePropertyName("metadata"); + serializer.Serialize(writer, value.Metadata); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/PortalLinkNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/PortalLinkNewtonsoftConverter.cs new file mode 100644 index 0000000..12d1ee2 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/PortalLinkNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for PortalLink that handles the Option<> structure + /// + public class PortalLinkNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override PortalLink ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, PortalLink existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? url = default(string?); + if (jsonObject["url"] != null) + { + url = jsonObject["url"].ToObject(); + } + + return new PortalLink( + url: url != null ? new Option(url) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, PortalLink value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UrlOption.IsSet && value.Url != null) + { + writer.WritePropertyName("url"); + serializer.Serialize(writer, value.Url); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/TokenErrorResponseNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/TokenErrorResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..62fee15 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/TokenErrorResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for TokenErrorResponse that handles the Option<> structure + /// + public class TokenErrorResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override TokenErrorResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, TokenErrorResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? error = default(string?); + if (jsonObject["error"] != null) + { + error = jsonObject["error"].ToObject(); + } + string? errorDescription = default(string?); + if (jsonObject["error_description"] != null) + { + errorDescription = jsonObject["error_description"].ToObject(); + } + + return new TokenErrorResponse( + error: error != null ? new Option(error) : default, errorDescription: errorDescription != null ? new Option(errorDescription) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, TokenErrorResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ErrorOption.IsSet && value.Error != null) + { + writer.WritePropertyName("error"); + serializer.Serialize(writer, value.Error); + } + if (value.ErrorDescriptionOption.IsSet && value.ErrorDescription != null) + { + writer.WritePropertyName("error_description"); + serializer.Serialize(writer, value.ErrorDescription); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/TokenIntrospectNewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/TokenIntrospectNewtonsoftConverter.cs new file mode 100644 index 0000000..515ac4a --- /dev/null +++ b/Kinde.Api/Accounts/Converters/TokenIntrospectNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for TokenIntrospect that handles the Option<> structure + /// + public class TokenIntrospectNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override TokenIntrospect ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, TokenIntrospect existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? active = default(bool?); + if (jsonObject["active"] != null) + { + active = jsonObject["active"].ToObject(serializer); + } + List aud = default(List); + if (jsonObject["aud"] != null) + { + aud = jsonObject["aud"].ToObject>(serializer); + } + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + int? exp = default(int?); + if (jsonObject["exp"] != null) + { + exp = jsonObject["exp"].ToObject(serializer); + } + int? iat = default(int?); + if (jsonObject["iat"] != null) + { + iat = jsonObject["iat"].ToObject(serializer); + } + + return new TokenIntrospect( + active: active != null ? new Option(active) : default, aud: aud != null ? new Option?>(aud) : default, clientId: clientId != null ? new Option(clientId) : default, exp: exp != null ? new Option(exp) : default, iat: iat != null ? new Option(iat) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, TokenIntrospect value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ActiveOption.IsSet && value.Active != null) + { + writer.WritePropertyName("active"); + serializer.Serialize(writer, value.Active); + } + if (value.AudOption.IsSet) + { + writer.WritePropertyName("aud"); + serializer.Serialize(writer, value.Aud); + } + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ExpOption.IsSet && value.Exp != null) + { + writer.WritePropertyName("exp"); + serializer.Serialize(writer, value.Exp); + } + if (value.IatOption.IsSet && value.Iat != null) + { + writer.WritePropertyName("iat"); + serializer.Serialize(writer, value.Iat); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/Converters/UserProfileV2NewtonsoftConverter.cs b/Kinde.Api/Accounts/Converters/UserProfileV2NewtonsoftConverter.cs new file mode 100644 index 0000000..0c9a174 --- /dev/null +++ b/Kinde.Api/Accounts/Converters/UserProfileV2NewtonsoftConverter.cs @@ -0,0 +1,150 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Accounts.Model; +using Kinde.Accounts.Client; + +namespace Kinde.Api.Accounts.Converters +{ + /// + /// Newtonsoft.Json converter for UserProfileV2 that handles the Option<> structure + /// + public class UserProfileV2NewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UserProfileV2 ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UserProfileV2 existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? sub = default(string?); + if (jsonObject["sub"] != null) + { + sub = jsonObject["sub"].ToObject(); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? givenName = default(string?); + if (jsonObject["given_name"] != null) + { + givenName = jsonObject["given_name"].ToObject(); + } + string? familyName = default(string?); + if (jsonObject["family_name"] != null) + { + familyName = jsonObject["family_name"].ToObject(); + } + int? updatedAt = default(int?); + if (jsonObject["updated_at"] != null) + { + updatedAt = jsonObject["updated_at"].ToObject(serializer); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + bool? emailVerified = default(bool?); + if (jsonObject["email_verified"] != null) + { + emailVerified = jsonObject["email_verified"].ToObject(serializer); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + string? preferredUsername = default(string?); + if (jsonObject["preferred_username"] != null) + { + preferredUsername = jsonObject["preferred_username"].ToObject(); + } + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new UserProfileV2( + sub: sub != null ? new Option(sub) : default, providedId: providedId != null ? new Option(providedId) : default, name: name != null ? new Option(name) : default, givenName: givenName != null ? new Option(givenName) : default, familyName: familyName != null ? new Option(familyName) : default, updatedAt: updatedAt != null ? new Option(updatedAt) : default, email: email != null ? new Option(email) : default, emailVerified: emailVerified != null ? new Option(emailVerified) : default, picture: picture != null ? new Option(picture) : default, preferredUsername: preferredUsername != null ? new Option(preferredUsername) : default, id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UserProfileV2 value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.SubOption.IsSet && value.Sub != null) + { + writer.WritePropertyName("sub"); + serializer.Serialize(writer, value.Sub); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.GivenNameOption.IsSet && value.GivenName != null) + { + writer.WritePropertyName("given_name"); + serializer.Serialize(writer, value.GivenName); + } + if (value.FamilyNameOption.IsSet && value.FamilyName != null) + { + writer.WritePropertyName("family_name"); + serializer.Serialize(writer, value.FamilyName); + } + if (value.UpdatedAtOption.IsSet && value.UpdatedAt != null) + { + writer.WritePropertyName("updated_at"); + serializer.Serialize(writer, value.UpdatedAt); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.EmailVerifiedOption.IsSet && value.EmailVerified != null) + { + writer.WritePropertyName("email_verified"); + serializer.Serialize(writer, value.EmailVerified); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.PreferredUsernameOption.IsSet && value.PreferredUsername != null) + { + writer.WritePropertyName("preferred_username"); + serializer.Serialize(writer, value.PreferredUsername); + } + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Accounts/README.md b/Kinde.Api/Accounts/README.md new file mode 100644 index 0000000..d43645e --- /dev/null +++ b/Kinde.Api/Accounts/README.md @@ -0,0 +1,2 @@ +# Created with Openapi Generator +See the project's [REAMDE](src/Kinde.Accounts/README.md) \ No newline at end of file diff --git a/Kinde.Api/Client/ApiClient.cs b/Kinde.Api/Client/ApiClient.cs index a4e5eb7..9e605fd 100644 --- a/Kinde.Api/Client/ApiClient.cs +++ b/Kinde.Api/Client/ApiClient.cs @@ -32,6 +32,56 @@ namespace Kinde.Api.Client { + /// + /// Custom contract resolver that respects [JsonPropertyName] attributes from System.Text.Json + /// and ensures Option<> properties use the OptionNewtonsoftConverter + /// + internal class JsonPropertyNameContractResolver : DefaultContractResolver + { + private static readonly Kinde.Api.Converters.OptionNewtonsoftConverter _optionConverter = new Kinde.Api.Converters.OptionNewtonsoftConverter(); + + public JsonPropertyNameContractResolver() + { + // Use camelCase naming strategy like the original, but allow [JsonPropertyName] to override + NamingStrategy = new CamelCaseNamingStrategy + { + OverrideSpecifiedNames = false + }; + } + + protected override JsonProperty CreateProperty(MemberInfo member, MemberSerialization memberSerialization) + { + var property = base.CreateProperty(member, memberSerialization); + + // Check for [JsonPropertyName] attribute (System.Text.Json) + var jsonPropertyNameAttr = member.GetCustomAttribute( + Type.GetType("System.Text.Json.Serialization.JsonPropertyNameAttribute, System.Text.Json")); + if (jsonPropertyNameAttr != null) + { + var nameProperty = jsonPropertyNameAttr.GetType().GetProperty("Name"); + if (nameProperty != null) + { + var name = nameProperty.GetValue(jsonPropertyNameAttr) as string; + if (!string.IsNullOrEmpty(name)) + { + property.PropertyName = name; + } + } + } + + // If the property type is Option<>, ensure it uses the OptionNewtonsoftConverter + // This is critical for nested objects that don't have custom converters + if (property.PropertyType.IsGenericType && + property.PropertyType.GetGenericTypeDefinition() == typeof(Option<>)) + { + property.Converter = _optionConverter; + property.MemberConverter = _optionConverter; + } + + return property; + } + } + /// /// Helper class for creating standard JSON converters /// @@ -42,14 +92,7 @@ internal static class JsonConverterHelper /// public static IList CreateStandardConverters() { - return new List - { - new Kinde.Api.Converters.NewtonsoftGenericEnumConverter(), - new Kinde.Api.Converters.CreateUserResponseNewtonsoftConverter(), - new Kinde.Api.Converters.OptionNewtonsoftConverter(), - new Kinde.Api.Converters.CreateUserRequestIdentitiesInnerNewtonsoftConverter(), - new Kinde.Api.Converters.CreateUserIdentityRequestNewtonsoftConverter() - }; + return JsonConverterRegistry.CreateStandardConverters(); } } @@ -64,13 +107,7 @@ internal class CustomJsonCodec { // OpenAPI generated types generally hide default constructors. ConstructorHandling = ConstructorHandling.AllowNonPublicDefaultConstructor, - ContractResolver = new DefaultContractResolver - { - NamingStrategy = new CamelCaseNamingStrategy - { - OverrideSpecifiedNames = false - } - }, + ContractResolver = new JsonPropertyNameContractResolver(), // Add our custom enum converter for proper enum serialization Converters = JsonConverterHelper.CreateStandardConverters() }; @@ -159,7 +196,11 @@ internal async Task Deserialize(HttpResponseMessage response, Type type) return DateTime.Parse(await response.Content.ReadAsStringAsync().ConfigureAwait(false), null, System.Globalization.DateTimeStyles.RoundtripKind); } - if (type == typeof(string) || type.Name.StartsWith("System.Nullable")) // return primitive type + // Check if this is a nullable enum - if so, let it go through JSON deserialization to use enum converters + var underlyingType = Nullable.GetUnderlyingType(type); + var isNullableEnum = underlyingType != null && underlyingType.IsEnum; + + if (type == typeof(string) || (type.Name.StartsWith("System.Nullable") && !isNullableEnum)) // return primitive type (but not nullable enums) { return Convert.ChangeType(await response.Content.ReadAsStringAsync().ConfigureAwait(false), type); } @@ -208,13 +249,7 @@ public partial class ApiClient : IDisposable, ISynchronousClient, IAsynchronousC { // OpenAPI generated types generally hide default constructors. ConstructorHandling = ConstructorHandling.AllowNonPublicDefaultConstructor, - ContractResolver = new DefaultContractResolver - { - NamingStrategy = new CamelCaseNamingStrategy - { - OverrideSpecifiedNames = false - } - }, + ContractResolver = new JsonPropertyNameContractResolver(), // Add our custom enum converter for proper enum serialization Converters = JsonConverterHelper.CreateStandardConverters() }; diff --git a/Kinde.Api/Client/JsonConverterRegistry.cs b/Kinde.Api/Client/JsonConverterRegistry.cs new file mode 100644 index 0000000..c968922 --- /dev/null +++ b/Kinde.Api/Client/JsonConverterRegistry.cs @@ -0,0 +1,287 @@ +// +// This file is automatically generated by the converter generator. +// DO NOT EDIT THIS FILE MANUALLY - your changes will be overwritten. +// To regenerate this file, run: python generate_converters.py --config +// + +using System; +using System.Collections.Generic; +using Newtonsoft.Json; + +namespace Kinde.Api.Client +{ + /// + /// Auto-generated registry of all JSON converters. + /// This class is automatically generated - do not edit manually. + /// + internal static class JsonConverterRegistry + { + /// + /// Creates the standard converter collection for JSON serialization. + /// This method is auto-generated - do not edit manually. + /// + public static IList CreateStandardConverters() + { + return new List + { + // Generic converters + new Kinde.Api.Converters.NewtonsoftGenericEnumConverter(), + new Kinde.Api.Converters.OptionNewtonsoftConverter(), + + // Request/Identity converters (manually maintained) + new Kinde.Api.Converters.CreateUserIdentityRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateUserRequestIdentitiesInnerNewtonsoftConverter(), + + // Request converters (alphabetically ordered) + new Kinde.Api.Converters.AddAPIScopeRequestNewtonsoftConverter(), + new Kinde.Api.Converters.AddAPIsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.AddOrganizationUsersRequestNewtonsoftConverter(), + new Kinde.Api.Converters.AddRoleScopeRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApiKeyRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApplicationRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateBillingAgreementRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateCategoryRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionRequestOptionsOneOf1NewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionRequestOptionsOneOf2NewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionRequestOptionsOneOfNewtonsoftConverter(), + new Kinde.Api.Converters.CreateEnvironmentVariableRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateFeatureFlagRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateMeterUsageRecordRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateOrganizationRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateOrganizationUserPermissionRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateOrganizationUserRoleRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreatePermissionRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreatePropertyRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateRoleRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateUserRequestNewtonsoftConverter(), + new Kinde.Api.Converters.CreateUserRequestProfileNewtonsoftConverter(), + new Kinde.Api.Converters.CreateWebHookRequestNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceConnectionRequestNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceConnectionRequestOptionsOneOf1NewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceConnectionRequestOptionsOneOfNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceLogoutRedirectURLsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceMFARequestNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceOrganizationMFARequestNewtonsoftConverter(), + new Kinde.Api.Converters.ReplaceRedirectCallbackURLsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.SetUserPasswordRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateAPIApplicationsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateAPIScopeRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateApplicationRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateApplicationTokensRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateApplicationsPropertyRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateBusinessRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateCategoryRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateConnectionRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateConnectionRequestOptionsOneOfNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateEnvironementFeatureFlagOverrideRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateEnvironmentVariableRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateIdentityRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationPropertiesRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationSessionsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationUsersRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdatePropertyRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateRolePermissionsRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateRolesRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateUserRequestNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateWebHookRequestNewtonsoftConverter(), + new Kinde.Api.Converters.VerifyApiKeyRequestNewtonsoftConverter(), + + // Response converters (alphabetically ordered) + new Kinde.Api.Converters.AddOrganizationUsersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.AddRoleScopeResponseNewtonsoftConverter(), + new Kinde.Api.Converters.ApiResultNewtonsoftConverter(), + new Kinde.Api.Converters.ApplicationsNewtonsoftConverter(), + new Kinde.Api.Converters.AuthorizeAppApiResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CategoryNewtonsoftConverter(), + new Kinde.Api.Converters.ConnectedAppsAccessTokenNewtonsoftConverter(), + new Kinde.Api.Converters.ConnectedAppsAuthUrlNewtonsoftConverter(), + new Kinde.Api.Converters.ConnectionConnectionNewtonsoftConverter(), + new Kinde.Api.Converters.ConnectionNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApiKeyResponseApiKeyNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApiKeyResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApiScopesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApiScopesResponseScopeNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApisResponseApiNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApisResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApplicationResponseApplicationNewtonsoftConverter(), + new Kinde.Api.Converters.CreateApplicationResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateCategoryResponseCategoryNewtonsoftConverter(), + new Kinde.Api.Converters.CreateCategoryResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionResponseConnectionNewtonsoftConverter(), + new Kinde.Api.Converters.CreateConnectionResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateEnvironmentVariableResponseEnvironmentVariableNewtonsoftConverter(), + new Kinde.Api.Converters.CreateEnvironmentVariableResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateIdentityResponseIdentityNewtonsoftConverter(), + new Kinde.Api.Converters.CreateIdentityResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateMeterUsageRecordResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateOrganizationResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateOrganizationResponseOrganizationNewtonsoftConverter(), + new Kinde.Api.Converters.CreatePropertyResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreatePropertyResponsePropertyNewtonsoftConverter(), + new Kinde.Api.Converters.CreateRolesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateRolesResponseRoleNewtonsoftConverter(), + new Kinde.Api.Converters.CreateSubscriberSuccessResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateSubscriberSuccessResponseSubscriberNewtonsoftConverter(), + new Kinde.Api.Converters.CreateUserResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateWebhookResponseNewtonsoftConverter(), + new Kinde.Api.Converters.CreateWebhookResponseWebhookNewtonsoftConverter(), + new Kinde.Api.Converters.DeleteApiResponseNewtonsoftConverter(), + new Kinde.Api.Converters.DeleteEnvironmentVariableResponseNewtonsoftConverter(), + new Kinde.Api.Converters.DeleteRoleScopeResponseNewtonsoftConverter(), + new Kinde.Api.Converters.DeleteWebhookResponseNewtonsoftConverter(), + new Kinde.Api.Converters.EnvironmentVariableNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.ErrorNewtonsoftConverter(), + new Kinde.Api.Converters.EventTypeNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiKeyResponseApiKeyNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiKeyResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiKeysResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiResponseApiNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiScopeResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiScopesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApisResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApplicationResponseApplicationNewtonsoftConverter(), + new Kinde.Api.Converters.GetApplicationResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetApplicationsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingAgreementsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingEntitlementsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetBusinessResponseBusinessNewtonsoftConverter(), + new Kinde.Api.Converters.GetBusinessResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetCategoriesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetConnectionsResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementResponseDataEntitlementNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementsResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementsResponseMetadataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentFeatureFlagsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentResponseEnvironmentBackgroundColorNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentResponseEnvironmentLinkColorNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentResponseEnvironmentNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentVariableResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEnvironmentVariablesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEventResponseEventNewtonsoftConverter(), + new Kinde.Api.Converters.GetEventResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetEventTypesResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetFeatureFlagsResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetFeatureFlagsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetIdentitiesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetIndustriesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationFeatureFlagsResponseFeatureFlagsValueNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationFeatureFlagsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationResponseBillingNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationUsersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationsUserPermissionsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationsUserRolesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetPermissionsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetPropertiesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetPropertyValuesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetRedirectCallbackUrlsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetRoleResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetRoleResponseRoleNewtonsoftConverter(), + new Kinde.Api.Converters.GetRolesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetSubscriberResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetSubscribersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetTimezonesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetUserMfaResponseMfaNewtonsoftConverter(), + new Kinde.Api.Converters.GetUserMfaResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPermissionsResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPermissionsResponseMetadataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPermissionsResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPropertiesResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPropertiesResponseMetadataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPropertiesResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserRolesResponseDataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserRolesResponseMetadataNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserRolesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetUserSessionsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.GetWebhooksResponseNewtonsoftConverter(), + new Kinde.Api.Converters.IdentityNewtonsoftConverter(), + new Kinde.Api.Converters.LogoutRedirectUrlsNewtonsoftConverter(), + new Kinde.Api.Converters.NotFoundResponseErrorsNewtonsoftConverter(), + new Kinde.Api.Converters.NotFoundResponseNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationItemSchemaNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserPermissionNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserRoleNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserRolePermissionsNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserRolePermissionsPermissionsNewtonsoftConverter(), + new Kinde.Api.Converters.PermissionsNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.PortalLinkNewtonsoftConverter(), + new Kinde.Api.Converters.PropertyNewtonsoftConverter(), + new Kinde.Api.Converters.PropertyValueNewtonsoftConverter(), + new Kinde.Api.Converters.ReadEnvLogoResponseNewtonsoftConverter(), + new Kinde.Api.Converters.ReadLogoResponseNewtonsoftConverter(), + new Kinde.Api.Converters.RedirectCallbackUrlsNewtonsoftConverter(), + new Kinde.Api.Converters.RoleNewtonsoftConverter(), + new Kinde.Api.Converters.RolePermissionsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.RoleScopesResponseNewtonsoftConverter(), + new Kinde.Api.Converters.RolesNewtonsoftConverter(), + new Kinde.Api.Converters.RotateApiKeyResponseApiKeyNewtonsoftConverter(), + new Kinde.Api.Converters.RotateApiKeyResponseNewtonsoftConverter(), + new Kinde.Api.Converters.ScopesNewtonsoftConverter(), + new Kinde.Api.Converters.SearchUsersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.SubscriberNewtonsoftConverter(), + new Kinde.Api.Converters.SubscribersSubscriberNewtonsoftConverter(), + new Kinde.Api.Converters.SuccessResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.TokenErrorResponseNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.TokenIntrospectNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateEnvironmentVariableResponseNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationUsersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateRolePermissionsResponseNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateUserResponseNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateWebhookResponseNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateWebhookResponseWebhookNewtonsoftConverter(), + new Kinde.Api.Converters.UserBillingNewtonsoftConverter(), + new Kinde.Api.Converters.UserIdentityNewtonsoftConverter(), + new Kinde.Api.Converters.UserIdentityResultNewtonsoftConverter(), + new Kinde.Api.Converters.UserNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.UserProfileV2NewtonsoftConverter(), + new Kinde.Api.Converters.UsersResponseNewtonsoftConverter(), + new Kinde.Api.Converters.VerifyApiKeyResponseNewtonsoftConverter(), + new Kinde.Api.Converters.WebhookNewtonsoftConverter(), + + // Inner model converters (alphabetically ordered) + new Kinde.Api.Converters.AddOrganizationUsersRequestUsersInnerNewtonsoftConverter(), + new Kinde.Api.Converters.CreateUserRequestIdentitiesInnerDetailsNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiKeysResponseApiKeysInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiResponseApiApplicationsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiResponseApiScopesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetApiScopesResponseScopesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetApisResponseApisInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetApisResponseApisInnerScopesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingAgreementsResponseAgreementsInnerEntitlementsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingAgreementsResponseAgreementsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingEntitlementsResponseEntitlementsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetBillingEntitlementsResponsePlansInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementsResponseDataEntitlementsInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetEntitlementsResponseDataPlansInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetFeatureFlagsResponseDataFeatureFlagsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetIndustriesResponseIndustriesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetOrganizationResponseBillingAgreementsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetTimezonesResponseTimezonesInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPermissionsResponseDataPermissionsInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserPropertiesResponseDataPropertiesInnerNewtonsoftConverter(), + new Kinde.Api.Accounts.Converters.GetUserRolesResponseDataRolesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.GetUserSessionsResponseSessionsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.OrganizationUserPermissionRolesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.ReadEnvLogoResponseLogosInnerNewtonsoftConverter(), + new Kinde.Api.Converters.ReadLogoResponseLogosInnerNewtonsoftConverter(), + new Kinde.Api.Converters.SearchUsersResponseResultsInnerApiScopesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.SearchUsersResponseResultsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateAPIApplicationsRequestApplicationsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateOrganizationUsersRequestUsersInnerNewtonsoftConverter(), + new Kinde.Api.Converters.UpdateRolePermissionsRequestPermissionsInnerNewtonsoftConverter(), + new Kinde.Api.Converters.UserIdentitiesInnerNewtonsoftConverter(), + new Kinde.Api.Converters.UsersResponseUsersInnerBillingNewtonsoftConverter(), + new Kinde.Api.Converters.UsersResponseUsersInnerNewtonsoftConverter(), + }; + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddAPIScopeRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/AddAPIScopeRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..cc4412e --- /dev/null +++ b/Kinde.Api/Converters/AddAPIScopeRequestNewtonsoftConverter.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddAPIScopeRequest that handles the Option<> structure + /// + public class AddAPIScopeRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddAPIScopeRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddAPIScopeRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string key = default(string); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new AddAPIScopeRequest( + description: description != null ? new Option(description) : default, key: key ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddAPIScopeRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddAPIsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/AddAPIsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..4702df5 --- /dev/null +++ b/Kinde.Api/Converters/AddAPIsRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddAPIsRequest that handles the Option<> structure + /// + public class AddAPIsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddAPIsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddAPIsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string audience = default(string); + if (jsonObject["audience"] != null) + { + audience = jsonObject["audience"].ToObject(); + } + + return new AddAPIsRequest( + name: name, audience: audience ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddAPIsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddOrganizationUsersRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/AddOrganizationUsersRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..2267af1 --- /dev/null +++ b/Kinde.Api/Converters/AddOrganizationUsersRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddOrganizationUsersRequest that handles the Option<> structure + /// + public class AddOrganizationUsersRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddOrganizationUsersRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddOrganizationUsersRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List users = default(List); + if (jsonObject["users"] != null) + { + users = jsonObject["users"].ToObject>(serializer); + } + + return new AddOrganizationUsersRequest( + users: users != null ? new Option?>(users) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddOrganizationUsersRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UsersOption.IsSet) + { + writer.WritePropertyName("users"); + serializer.Serialize(writer, value.Users); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/AddOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..398b8cf --- /dev/null +++ b/Kinde.Api/Converters/AddOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddOrganizationUsersRequestUsersInner that handles the Option<> structure + /// + public class AddOrganizationUsersRequestUsersInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddOrganizationUsersRequestUsersInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddOrganizationUsersRequestUsersInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + + return new AddOrganizationUsersRequestUsersInner( + id: id != null ? new Option(id) : default, roles: roles != null ? new Option?>(roles) : default, permissions: permissions != null ? new Option?>(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddOrganizationUsersRequestUsersInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddOrganizationUsersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/AddOrganizationUsersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..e2b8017 --- /dev/null +++ b/Kinde.Api/Converters/AddOrganizationUsersResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddOrganizationUsersResponse that handles the Option<> structure + /// + public class AddOrganizationUsersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddOrganizationUsersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddOrganizationUsersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List usersAdded = default(List); + if (jsonObject["users_added"] != null) + { + usersAdded = jsonObject["users_added"].ToObject>(serializer); + } + + return new AddOrganizationUsersResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, usersAdded: usersAdded != null ? new Option?>(usersAdded) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddOrganizationUsersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.UsersAddedOption.IsSet) + { + writer.WritePropertyName("users_added"); + serializer.Serialize(writer, value.UsersAdded); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddRoleScopeRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/AddRoleScopeRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..2986bb3 --- /dev/null +++ b/Kinde.Api/Converters/AddRoleScopeRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddRoleScopeRequest that handles the Option<> structure + /// + public class AddRoleScopeRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddRoleScopeRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddRoleScopeRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string scopeId = default(string); + if (jsonObject["scope_id"] != null) + { + scopeId = jsonObject["scope_id"].ToObject(); + } + + return new AddRoleScopeRequest( + scopeId: scopeId ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddRoleScopeRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AddRoleScopeResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/AddRoleScopeResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..1c8fd12 --- /dev/null +++ b/Kinde.Api/Converters/AddRoleScopeResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AddRoleScopeResponse that handles the Option<> structure + /// + public class AddRoleScopeResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AddRoleScopeResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AddRoleScopeResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new AddRoleScopeResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AddRoleScopeResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ApiResultNewtonsoftConverter.cs b/Kinde.Api/Converters/ApiResultNewtonsoftConverter.cs new file mode 100644 index 0000000..53e1d01 --- /dev/null +++ b/Kinde.Api/Converters/ApiResultNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ApiResult that handles the Option<> structure + /// + public class ApiResultNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ApiResult ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ApiResult existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? result = default(string?); + if (jsonObject["result"] != null) + { + result = jsonObject["result"].ToObject(); + } + + return new ApiResult( + result: result != null ? new Option(result) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ApiResult value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ResultOption.IsSet && value.Result != null) + { + writer.WritePropertyName("result"); + serializer.Serialize(writer, value.Result); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ApplicationsNewtonsoftConverter.cs b/Kinde.Api/Converters/ApplicationsNewtonsoftConverter.cs new file mode 100644 index 0000000..65663d5 --- /dev/null +++ b/Kinde.Api/Converters/ApplicationsNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Applications that handles the Option<> structure + /// + public class ApplicationsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Applications ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Applications existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + + return new Applications( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, type: type != null ? new Option(type) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Applications value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/AuthorizeAppApiResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/AuthorizeAppApiResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..88ffe6e --- /dev/null +++ b/Kinde.Api/Converters/AuthorizeAppApiResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for AuthorizeAppApiResponse that handles the Option<> structure + /// + public class AuthorizeAppApiResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override AuthorizeAppApiResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, AuthorizeAppApiResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + List applicationsDisconnected = default(List); + if (jsonObject["applications_disconnected"] != null) + { + applicationsDisconnected = jsonObject["applications_disconnected"].ToObject>(serializer); + } + List applicationsConnected = default(List); + if (jsonObject["applications_connected"] != null) + { + applicationsConnected = jsonObject["applications_connected"].ToObject>(serializer); + } + + return new AuthorizeAppApiResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, applicationsDisconnected: applicationsDisconnected != null ? new Option?>(applicationsDisconnected) : default, applicationsConnected: applicationsConnected != null ? new Option?>(applicationsConnected) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, AuthorizeAppApiResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.ApplicationsDisconnectedOption.IsSet) + { + writer.WritePropertyName("applications_disconnected"); + serializer.Serialize(writer, value.ApplicationsDisconnected); + } + if (value.ApplicationsConnectedOption.IsSet) + { + writer.WritePropertyName("applications_connected"); + serializer.Serialize(writer, value.ApplicationsConnected); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CategoryNewtonsoftConverter.cs b/Kinde.Api/Converters/CategoryNewtonsoftConverter.cs new file mode 100644 index 0000000..b801e95 --- /dev/null +++ b/Kinde.Api/Converters/CategoryNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Category that handles the Option<> structure + /// + public class CategoryNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Category ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Category existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new Category( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Category value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ConnectedAppsAccessTokenNewtonsoftConverter.cs b/Kinde.Api/Converters/ConnectedAppsAccessTokenNewtonsoftConverter.cs new file mode 100644 index 0000000..84a68d0 --- /dev/null +++ b/Kinde.Api/Converters/ConnectedAppsAccessTokenNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ConnectedAppsAccessToken that handles the Option<> structure + /// + public class ConnectedAppsAccessTokenNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ConnectedAppsAccessToken ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ConnectedAppsAccessToken existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? accessToken = default(string?); + if (jsonObject["access_token"] != null) + { + accessToken = jsonObject["access_token"].ToObject(); + } + string? accessTokenExpiry = default(string?); + if (jsonObject["access_token_expiry"] != null) + { + accessTokenExpiry = jsonObject["access_token_expiry"].ToObject(); + } + + return new ConnectedAppsAccessToken( + accessToken: accessToken != null ? new Option(accessToken) : default, accessTokenExpiry: accessTokenExpiry != null ? new Option(accessTokenExpiry) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ConnectedAppsAccessToken value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.AccessTokenOption.IsSet && value.AccessToken != null) + { + writer.WritePropertyName("access_token"); + serializer.Serialize(writer, value.AccessToken); + } + if (value.AccessTokenExpiryOption.IsSet && value.AccessTokenExpiry != null) + { + writer.WritePropertyName("access_token_expiry"); + serializer.Serialize(writer, value.AccessTokenExpiry); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ConnectedAppsAuthUrlNewtonsoftConverter.cs b/Kinde.Api/Converters/ConnectedAppsAuthUrlNewtonsoftConverter.cs new file mode 100644 index 0000000..13b1d8c --- /dev/null +++ b/Kinde.Api/Converters/ConnectedAppsAuthUrlNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ConnectedAppsAuthUrl that handles the Option<> structure + /// + public class ConnectedAppsAuthUrlNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ConnectedAppsAuthUrl ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ConnectedAppsAuthUrl existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? url = default(string?); + if (jsonObject["url"] != null) + { + url = jsonObject["url"].ToObject(); + } + string? sessionId = default(string?); + if (jsonObject["session_id"] != null) + { + sessionId = jsonObject["session_id"].ToObject(); + } + + return new ConnectedAppsAuthUrl( + url: url != null ? new Option(url) : default, sessionId: sessionId != null ? new Option(sessionId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ConnectedAppsAuthUrl value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UrlOption.IsSet && value.Url != null) + { + writer.WritePropertyName("url"); + serializer.Serialize(writer, value.Url); + } + if (value.SessionIdOption.IsSet && value.SessionId != null) + { + writer.WritePropertyName("session_id"); + serializer.Serialize(writer, value.SessionId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ConnectionConnectionNewtonsoftConverter.cs b/Kinde.Api/Converters/ConnectionConnectionNewtonsoftConverter.cs new file mode 100644 index 0000000..728d179 --- /dev/null +++ b/Kinde.Api/Converters/ConnectionConnectionNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ConnectionConnection that handles the Option<> structure + /// + public class ConnectionConnectionNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ConnectionConnection ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ConnectionConnection existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? displayName = default(string?); + if (jsonObject["display_name"] != null) + { + displayName = jsonObject["display_name"].ToObject(); + } + string? strategy = default(string?); + if (jsonObject["strategy"] != null) + { + strategy = jsonObject["strategy"].ToObject(); + } + + return new ConnectionConnection( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, displayName: displayName != null ? new Option(displayName) : default, strategy: strategy != null ? new Option(strategy) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ConnectionConnection value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DisplayNameOption.IsSet && value.DisplayName != null) + { + writer.WritePropertyName("display_name"); + serializer.Serialize(writer, value.DisplayName); + } + if (value.StrategyOption.IsSet && value.Strategy != null) + { + writer.WritePropertyName("strategy"); + serializer.Serialize(writer, value.Strategy); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ConnectionNewtonsoftConverter.cs b/Kinde.Api/Converters/ConnectionNewtonsoftConverter.cs new file mode 100644 index 0000000..45f2b3b --- /dev/null +++ b/Kinde.Api/Converters/ConnectionNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Connection that handles the Option<> structure + /// + public class ConnectionNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Connection ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Connection existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + ConnectionConnection? varConnection = default(ConnectionConnection?); + if (jsonObject["connection"] != null) + { + varConnection = jsonObject["connection"].ToObject(serializer); + } + + return new Connection( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, varConnection: varConnection != null ? new Option(varConnection) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Connection value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.VarConnectionOption.IsSet && value.VarConnection != null) + { + writer.WritePropertyName("connection"); + serializer.Serialize(writer, value.VarConnection); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApiKeyRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApiKeyRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..04248dc --- /dev/null +++ b/Kinde.Api/Converters/CreateApiKeyRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApiKeyRequest that handles the Option<> structure + /// + public class CreateApiKeyRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApiKeyRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApiKeyRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List scopeIds = default(List); + if (jsonObject["scope_ids"] != null) + { + scopeIds = jsonObject["scope_ids"].ToObject>(serializer); + } + string? userId = default(string?); + if (jsonObject["user_id"] != null) + { + userId = jsonObject["user_id"].ToObject(); + } + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string apiId = default(string); + if (jsonObject["api_id"] != null) + { + apiId = jsonObject["api_id"].ToObject(); + } + + return new CreateApiKeyRequest( + scopeIds: scopeIds != null ? new Option?>(scopeIds) : default, userId: userId != null ? new Option(userId) : default, orgCode: orgCode != null ? new Option(orgCode) : default, name: name, apiId: apiId ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApiKeyRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ScopeIdsOption.IsSet) + { + writer.WritePropertyName("scope_ids"); + serializer.Serialize(writer, value.ScopeIds); + } + if (value.UserIdOption.IsSet && value.UserId != null) + { + writer.WritePropertyName("user_id"); + serializer.Serialize(writer, value.UserId); + } + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApiKeyResponseApiKeyNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApiKeyResponseApiKeyNewtonsoftConverter.cs new file mode 100644 index 0000000..45d4d5a --- /dev/null +++ b/Kinde.Api/Converters/CreateApiKeyResponseApiKeyNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApiKeyResponseApiKey that handles the Option<> structure + /// + public class CreateApiKeyResponseApiKeyNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApiKeyResponseApiKey ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApiKeyResponseApiKey existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new CreateApiKeyResponseApiKey( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApiKeyResponseApiKey value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApiKeyResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApiKeyResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..b852e16 --- /dev/null +++ b/Kinde.Api/Converters/CreateApiKeyResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApiKeyResponse that handles the Option<> structure + /// + public class CreateApiKeyResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApiKeyResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApiKeyResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateApiKeyResponseApiKey? apiKey = default(CreateApiKeyResponseApiKey?); + if (jsonObject["api_key"] != null) + { + apiKey = jsonObject["api_key"].ToObject(serializer); + } + + return new CreateApiKeyResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, apiKey: apiKey != null ? new Option(apiKey) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApiKeyResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.ApiKeyOption.IsSet && value.ApiKey != null) + { + writer.WritePropertyName("api_key"); + serializer.Serialize(writer, value.ApiKey); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApiScopesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApiScopesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..e5207c2 --- /dev/null +++ b/Kinde.Api/Converters/CreateApiScopesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApiScopesResponse that handles the Option<> structure + /// + public class CreateApiScopesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApiScopesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApiScopesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateApiScopesResponseScope? scope = default(CreateApiScopesResponseScope?); + if (jsonObject["scope"] != null) + { + scope = jsonObject["scope"].ToObject(serializer); + } + + return new CreateApiScopesResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, scope: scope != null ? new Option(scope) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApiScopesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.ScopeOption.IsSet && value.Scope != null) + { + writer.WritePropertyName("scope"); + serializer.Serialize(writer, value.Scope); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApiScopesResponseScopeNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApiScopesResponseScopeNewtonsoftConverter.cs new file mode 100644 index 0000000..0a2df99 --- /dev/null +++ b/Kinde.Api/Converters/CreateApiScopesResponseScopeNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApiScopesResponseScope that handles the Option<> structure + /// + public class CreateApiScopesResponseScopeNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApiScopesResponseScope ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApiScopesResponseScope existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateApiScopesResponseScope( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApiScopesResponseScope value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApisResponseApiNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApisResponseApiNewtonsoftConverter.cs new file mode 100644 index 0000000..2b7acd9 --- /dev/null +++ b/Kinde.Api/Converters/CreateApisResponseApiNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApisResponseApi that handles the Option<> structure + /// + public class CreateApisResponseApiNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApisResponseApi ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApisResponseApi existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateApisResponseApi( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApisResponseApi value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApisResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApisResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..9b30f6e --- /dev/null +++ b/Kinde.Api/Converters/CreateApisResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApisResponse that handles the Option<> structure + /// + public class CreateApisResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApisResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApisResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateApisResponseApi? api = default(CreateApisResponseApi?); + if (jsonObject["api"] != null) + { + api = jsonObject["api"].ToObject(serializer); + } + + return new CreateApisResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, api: api != null ? new Option(api) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApisResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.ApiOption.IsSet && value.Api != null) + { + writer.WritePropertyName("api"); + serializer.Serialize(writer, value.Api); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApplicationRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApplicationRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..e42a73c --- /dev/null +++ b/Kinde.Api/Converters/CreateApplicationRequestNewtonsoftConverter.cs @@ -0,0 +1,64 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApplicationRequest that handles the Option<> structure + /// + public class CreateApplicationRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApplicationRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApplicationRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + CreateApplicationRequest.TypeEnum type = default(CreateApplicationRequest.TypeEnum); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = CreateApplicationRequest.TypeEnumFromString(typeStr); + } + } + + return new CreateApplicationRequest( + orgCode: orgCode != null ? new Option(orgCode) : default, name: name, type: type ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApplicationRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApplicationResponseApplicationNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApplicationResponseApplicationNewtonsoftConverter.cs new file mode 100644 index 0000000..0f8e026 --- /dev/null +++ b/Kinde.Api/Converters/CreateApplicationResponseApplicationNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApplicationResponseApplication that handles the Option<> structure + /// + public class CreateApplicationResponseApplicationNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApplicationResponseApplication ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApplicationResponseApplication existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + + return new CreateApplicationResponseApplication( + id: id != null ? new Option(id) : default, clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApplicationResponseApplication value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateApplicationResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateApplicationResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..7e867b0 --- /dev/null +++ b/Kinde.Api/Converters/CreateApplicationResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateApplicationResponse that handles the Option<> structure + /// + public class CreateApplicationResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateApplicationResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateApplicationResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + CreateApplicationResponseApplication? application = default(CreateApplicationResponseApplication?); + if (jsonObject["application"] != null) + { + application = jsonObject["application"].ToObject(serializer); + } + + return new CreateApplicationResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, application: application != null ? new Option(application) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateApplicationResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApplicationOption.IsSet && value.Application != null) + { + writer.WritePropertyName("application"); + serializer.Serialize(writer, value.Application); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateBillingAgreementRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateBillingAgreementRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..ae86ef3 --- /dev/null +++ b/Kinde.Api/Converters/CreateBillingAgreementRequestNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateBillingAgreementRequest that handles the Option<> structure + /// + public class CreateBillingAgreementRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateBillingAgreementRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateBillingAgreementRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? isInvoiceNow = default(bool?); + if (jsonObject["is_invoice_now"] != null) + { + isInvoiceNow = jsonObject["is_invoice_now"].ToObject(serializer); + } + bool? isProrate = default(bool?); + if (jsonObject["is_prorate"] != null) + { + isProrate = jsonObject["is_prorate"].ToObject(serializer); + } + string customerId = default(string); + if (jsonObject["customer_id"] != null) + { + customerId = jsonObject["customer_id"].ToObject(); + } + string planCode = default(string); + if (jsonObject["plan_code"] != null) + { + planCode = jsonObject["plan_code"].ToObject(); + } + + return new CreateBillingAgreementRequest( + isInvoiceNow: isInvoiceNow != null ? new Option(isInvoiceNow) : default, isProrate: isProrate != null ? new Option(isProrate) : default, customerId: customerId, planCode: planCode ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateBillingAgreementRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IsInvoiceNowOption.IsSet && value.IsInvoiceNow != null) + { + writer.WritePropertyName("is_invoice_now"); + serializer.Serialize(writer, value.IsInvoiceNow); + } + if (value.IsProrateOption.IsSet && value.IsProrate != null) + { + writer.WritePropertyName("is_prorate"); + serializer.Serialize(writer, value.IsProrate); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateCategoryRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateCategoryRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..eaab4e3 --- /dev/null +++ b/Kinde.Api/Converters/CreateCategoryRequestNewtonsoftConverter.cs @@ -0,0 +1,54 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateCategoryRequest that handles the Option<> structure + /// + public class CreateCategoryRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateCategoryRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateCategoryRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + CreateCategoryRequest.ContextEnum context = default(CreateCategoryRequest.ContextEnum); + if (jsonObject["context"] != null) + { + var contextStr = jsonObject["context"].ToObject(); + if (!string.IsNullOrEmpty(contextStr)) + { + context = CreateCategoryRequest.ContextEnumFromString(contextStr); + } + } + + return new CreateCategoryRequest( + name: name, context: context ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateCategoryRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateCategoryResponseCategoryNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateCategoryResponseCategoryNewtonsoftConverter.cs new file mode 100644 index 0000000..a1feae1 --- /dev/null +++ b/Kinde.Api/Converters/CreateCategoryResponseCategoryNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateCategoryResponseCategory that handles the Option<> structure + /// + public class CreateCategoryResponseCategoryNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateCategoryResponseCategory ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateCategoryResponseCategory existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateCategoryResponseCategory( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateCategoryResponseCategory value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateCategoryResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateCategoryResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..0f3e81c --- /dev/null +++ b/Kinde.Api/Converters/CreateCategoryResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateCategoryResponse that handles the Option<> structure + /// + public class CreateCategoryResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateCategoryResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateCategoryResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateCategoryResponseCategory? category = default(CreateCategoryResponseCategory?); + if (jsonObject["category"] != null) + { + category = jsonObject["category"].ToObject(serializer); + } + + return new CreateCategoryResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, category: category != null ? new Option(category) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateCategoryResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.CategoryOption.IsSet && value.Category != null) + { + writer.WritePropertyName("category"); + serializer.Serialize(writer, value.Category); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..3d491b6 --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionRequestNewtonsoftConverter.cs @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionRequest that handles the Option<> structure + /// + public class CreateConnectionRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + CreateConnectionRequest.StrategyEnum? strategy = default(CreateConnectionRequest.StrategyEnum?); + if (jsonObject["strategy"] != null) + { + var strategyStr = jsonObject["strategy"].ToObject(); + if (!string.IsNullOrEmpty(strategyStr)) + { + strategy = CreateConnectionRequest.StrategyEnumFromString(strategyStr); + } + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? displayName = default(string?); + if (jsonObject["display_name"] != null) + { + displayName = jsonObject["display_name"].ToObject(); + } + List enabledApplications = default(List); + if (jsonObject["enabled_applications"] != null) + { + enabledApplications = jsonObject["enabled_applications"].ToObject>(serializer); + } + string? organizationCode = default(string?); + if (jsonObject["organization_code"] != null) + { + organizationCode = jsonObject["organization_code"].ToObject(); + } + CreateConnectionRequestOptions? options = default(CreateConnectionRequestOptions?); + if (jsonObject["options"] != null) + { + options = jsonObject["options"].ToObject(serializer); + } + + return new CreateConnectionRequest( + strategy: strategy != null ? new Option(strategy) : default, name: name != null ? new Option(name) : default, displayName: displayName != null ? new Option(displayName) : default, enabledApplications: enabledApplications != null ? new Option?>(enabledApplications) : default, organizationCode: organizationCode != null ? new Option(organizationCode) : default, options: options != null ? new Option(options) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.StrategyOption.IsSet && value.Strategy != null) + { + writer.WritePropertyName("strategy"); + var strategyStr = CreateConnectionRequest.StrategyEnumToJsonValue(value.Strategy.Value); + writer.WriteValue(strategyStr); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DisplayNameOption.IsSet && value.DisplayName != null) + { + writer.WritePropertyName("display_name"); + serializer.Serialize(writer, value.DisplayName); + } + if (value.EnabledApplicationsOption.IsSet) + { + writer.WritePropertyName("enabled_applications"); + serializer.Serialize(writer, value.EnabledApplications); + } + if (value.OrganizationCodeOption.IsSet && value.OrganizationCode != null) + { + writer.WritePropertyName("organization_code"); + serializer.Serialize(writer, value.OrganizationCode); + } + if (value.OptionsOption.IsSet && value.Options != null) + { + writer.WritePropertyName("options"); + serializer.Serialize(writer, value.Options); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf1NewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf1NewtonsoftConverter.cs new file mode 100644 index 0000000..9f5a2db --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf1NewtonsoftConverter.cs @@ -0,0 +1,160 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionRequestOptionsOneOf1 that handles the Option<> structure + /// + public class CreateConnectionRequestOptionsOneOf1NewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionRequestOptionsOneOf1 ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionRequestOptionsOneOf1 existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + List homeRealmDomains = default(List); + if (jsonObject["home_realm_domains"] != null) + { + homeRealmDomains = jsonObject["home_realm_domains"].ToObject>(serializer); + } + string? entraIdDomain = default(string?); + if (jsonObject["entra_id_domain"] != null) + { + entraIdDomain = jsonObject["entra_id_domain"].ToObject(); + } + bool? isUseCommonEndpoint = default(bool?); + if (jsonObject["is_use_common_endpoint"] != null) + { + isUseCommonEndpoint = jsonObject["is_use_common_endpoint"].ToObject(serializer); + } + bool? isSyncUserProfileOnLogin = default(bool?); + if (jsonObject["is_sync_user_profile_on_login"] != null) + { + isSyncUserProfileOnLogin = jsonObject["is_sync_user_profile_on_login"].ToObject(serializer); + } + bool? isRetrieveProviderUserGroups = default(bool?); + if (jsonObject["is_retrieve_provider_user_groups"] != null) + { + isRetrieveProviderUserGroups = jsonObject["is_retrieve_provider_user_groups"].ToObject(serializer); + } + bool? isExtendedAttributesRequired = default(bool?); + if (jsonObject["is_extended_attributes_required"] != null) + { + isExtendedAttributesRequired = jsonObject["is_extended_attributes_required"].ToObject(serializer); + } + bool? isAutoJoinOrganizationEnabled = default(bool?); + if (jsonObject["is_auto_join_organization_enabled"] != null) + { + isAutoJoinOrganizationEnabled = jsonObject["is_auto_join_organization_enabled"].ToObject(serializer); + } + bool? isCreateMissingUser = default(bool?); + if (jsonObject["is_create_missing_user"] != null) + { + isCreateMissingUser = jsonObject["is_create_missing_user"].ToObject(serializer); + } + bool? isForceShowSsoButton = default(bool?); + if (jsonObject["is_force_show_sso_button"] != null) + { + isForceShowSsoButton = jsonObject["is_force_show_sso_button"].ToObject(serializer); + } + Dictionary upstreamParams = default(Dictionary); + if (jsonObject["upstream_params"] != null) + { + upstreamParams = jsonObject["upstream_params"].ToObject>(serializer); + } + + return new CreateConnectionRequestOptionsOneOf1( + clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default, homeRealmDomains: homeRealmDomains != null ? new Option?>(homeRealmDomains) : default, entraIdDomain: entraIdDomain != null ? new Option(entraIdDomain) : default, isUseCommonEndpoint: isUseCommonEndpoint != null ? new Option(isUseCommonEndpoint) : default, isSyncUserProfileOnLogin: isSyncUserProfileOnLogin != null ? new Option(isSyncUserProfileOnLogin) : default, isRetrieveProviderUserGroups: isRetrieveProviderUserGroups != null ? new Option(isRetrieveProviderUserGroups) : default, isExtendedAttributesRequired: isExtendedAttributesRequired != null ? new Option(isExtendedAttributesRequired) : default, isAutoJoinOrganizationEnabled: isAutoJoinOrganizationEnabled != null ? new Option(isAutoJoinOrganizationEnabled) : default, isCreateMissingUser: isCreateMissingUser != null ? new Option(isCreateMissingUser) : default, isForceShowSsoButton: isForceShowSsoButton != null ? new Option(isForceShowSsoButton) : default, upstreamParams: upstreamParams != null ? new Option>(upstreamParams) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionRequestOptionsOneOf1 value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + if (value.HomeRealmDomainsOption.IsSet) + { + writer.WritePropertyName("home_realm_domains"); + serializer.Serialize(writer, value.HomeRealmDomains); + } + if (value.EntraIdDomainOption.IsSet && value.EntraIdDomain != null) + { + writer.WritePropertyName("entra_id_domain"); + serializer.Serialize(writer, value.EntraIdDomain); + } + if (value.IsUseCommonEndpointOption.IsSet && value.IsUseCommonEndpoint != null) + { + writer.WritePropertyName("is_use_common_endpoint"); + serializer.Serialize(writer, value.IsUseCommonEndpoint); + } + if (value.IsSyncUserProfileOnLoginOption.IsSet && value.IsSyncUserProfileOnLogin != null) + { + writer.WritePropertyName("is_sync_user_profile_on_login"); + serializer.Serialize(writer, value.IsSyncUserProfileOnLogin); + } + if (value.IsRetrieveProviderUserGroupsOption.IsSet && value.IsRetrieveProviderUserGroups != null) + { + writer.WritePropertyName("is_retrieve_provider_user_groups"); + serializer.Serialize(writer, value.IsRetrieveProviderUserGroups); + } + if (value.IsExtendedAttributesRequiredOption.IsSet && value.IsExtendedAttributesRequired != null) + { + writer.WritePropertyName("is_extended_attributes_required"); + serializer.Serialize(writer, value.IsExtendedAttributesRequired); + } + if (value.IsAutoJoinOrganizationEnabledOption.IsSet && value.IsAutoJoinOrganizationEnabled != null) + { + writer.WritePropertyName("is_auto_join_organization_enabled"); + serializer.Serialize(writer, value.IsAutoJoinOrganizationEnabled); + } + if (value.IsCreateMissingUserOption.IsSet && value.IsCreateMissingUser != null) + { + writer.WritePropertyName("is_create_missing_user"); + serializer.Serialize(writer, value.IsCreateMissingUser); + } + if (value.IsForceShowSsoButtonOption.IsSet && value.IsForceShowSsoButton != null) + { + writer.WritePropertyName("is_force_show_sso_button"); + serializer.Serialize(writer, value.IsForceShowSsoButton); + } + if (value.UpstreamParamsOption.IsSet) + { + writer.WritePropertyName("upstream_params"); + serializer.Serialize(writer, value.UpstreamParams); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf2NewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf2NewtonsoftConverter.cs new file mode 100644 index 0000000..b18a1d2 --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOf2NewtonsoftConverter.cs @@ -0,0 +1,180 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionRequestOptionsOneOf2 that handles the Option<> structure + /// + public class CreateConnectionRequestOptionsOneOf2NewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionRequestOptionsOneOf2 ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionRequestOptionsOneOf2 existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List homeRealmDomains = default(List); + if (jsonObject["home_realm_domains"] != null) + { + homeRealmDomains = jsonObject["home_realm_domains"].ToObject>(serializer); + } + string? samlEntityId = default(string?); + if (jsonObject["saml_entity_id"] != null) + { + samlEntityId = jsonObject["saml_entity_id"].ToObject(); + } + string? samlAcsUrl = default(string?); + if (jsonObject["saml_acs_url"] != null) + { + samlAcsUrl = jsonObject["saml_acs_url"].ToObject(); + } + string? samlIdpMetadataUrl = default(string?); + if (jsonObject["saml_idp_metadata_url"] != null) + { + samlIdpMetadataUrl = jsonObject["saml_idp_metadata_url"].ToObject(); + } + string? samlSignInUrl = default(string?); + if (jsonObject["saml_sign_in_url"] != null) + { + samlSignInUrl = jsonObject["saml_sign_in_url"].ToObject(); + } + string? samlEmailKeyAttr = default(string?); + if (jsonObject["saml_email_key_attr"] != null) + { + samlEmailKeyAttr = jsonObject["saml_email_key_attr"].ToObject(); + } + string? samlFirstNameKeyAttr = default(string?); + if (jsonObject["saml_first_name_key_attr"] != null) + { + samlFirstNameKeyAttr = jsonObject["saml_first_name_key_attr"].ToObject(); + } + string? samlLastNameKeyAttr = default(string?); + if (jsonObject["saml_last_name_key_attr"] != null) + { + samlLastNameKeyAttr = jsonObject["saml_last_name_key_attr"].ToObject(); + } + bool? isCreateMissingUser = default(bool?); + if (jsonObject["is_create_missing_user"] != null) + { + isCreateMissingUser = jsonObject["is_create_missing_user"].ToObject(serializer); + } + bool? isForceShowSsoButton = default(bool?); + if (jsonObject["is_force_show_sso_button"] != null) + { + isForceShowSsoButton = jsonObject["is_force_show_sso_button"].ToObject(serializer); + } + Dictionary upstreamParams = default(Dictionary); + if (jsonObject["upstream_params"] != null) + { + upstreamParams = jsonObject["upstream_params"].ToObject>(serializer); + } + string? samlSigningCertificate = default(string?); + if (jsonObject["saml_signing_certificate"] != null) + { + samlSigningCertificate = jsonObject["saml_signing_certificate"].ToObject(); + } + string? samlSigningPrivateKey = default(string?); + if (jsonObject["saml_signing_private_key"] != null) + { + samlSigningPrivateKey = jsonObject["saml_signing_private_key"].ToObject(); + } + bool? isAutoJoinOrganizationEnabled = default(bool?); + if (jsonObject["is_auto_join_organization_enabled"] != null) + { + isAutoJoinOrganizationEnabled = jsonObject["is_auto_join_organization_enabled"].ToObject(serializer); + } + + return new CreateConnectionRequestOptionsOneOf2( + homeRealmDomains: homeRealmDomains != null ? new Option?>(homeRealmDomains) : default, samlEntityId: samlEntityId != null ? new Option(samlEntityId) : default, samlAcsUrl: samlAcsUrl != null ? new Option(samlAcsUrl) : default, samlIdpMetadataUrl: samlIdpMetadataUrl != null ? new Option(samlIdpMetadataUrl) : default, samlSignInUrl: samlSignInUrl != null ? new Option(samlSignInUrl) : default, samlEmailKeyAttr: samlEmailKeyAttr != null ? new Option(samlEmailKeyAttr) : default, samlFirstNameKeyAttr: samlFirstNameKeyAttr != null ? new Option(samlFirstNameKeyAttr) : default, samlLastNameKeyAttr: samlLastNameKeyAttr != null ? new Option(samlLastNameKeyAttr) : default, isCreateMissingUser: isCreateMissingUser != null ? new Option(isCreateMissingUser) : default, isForceShowSsoButton: isForceShowSsoButton != null ? new Option(isForceShowSsoButton) : default, upstreamParams: upstreamParams != null ? new Option>(upstreamParams) : default, samlSigningCertificate: samlSigningCertificate != null ? new Option(samlSigningCertificate) : default, samlSigningPrivateKey: samlSigningPrivateKey != null ? new Option(samlSigningPrivateKey) : default, isAutoJoinOrganizationEnabled: isAutoJoinOrganizationEnabled != null ? new Option(isAutoJoinOrganizationEnabled) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionRequestOptionsOneOf2 value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HomeRealmDomainsOption.IsSet) + { + writer.WritePropertyName("home_realm_domains"); + serializer.Serialize(writer, value.HomeRealmDomains); + } + if (value.SamlEntityIdOption.IsSet && value.SamlEntityId != null) + { + writer.WritePropertyName("saml_entity_id"); + serializer.Serialize(writer, value.SamlEntityId); + } + if (value.SamlAcsUrlOption.IsSet && value.SamlAcsUrl != null) + { + writer.WritePropertyName("saml_acs_url"); + serializer.Serialize(writer, value.SamlAcsUrl); + } + if (value.SamlIdpMetadataUrlOption.IsSet && value.SamlIdpMetadataUrl != null) + { + writer.WritePropertyName("saml_idp_metadata_url"); + serializer.Serialize(writer, value.SamlIdpMetadataUrl); + } + if (value.SamlSignInUrlOption.IsSet && value.SamlSignInUrl != null) + { + writer.WritePropertyName("saml_sign_in_url"); + serializer.Serialize(writer, value.SamlSignInUrl); + } + if (value.SamlEmailKeyAttrOption.IsSet && value.SamlEmailKeyAttr != null) + { + writer.WritePropertyName("saml_email_key_attr"); + serializer.Serialize(writer, value.SamlEmailKeyAttr); + } + if (value.SamlFirstNameKeyAttrOption.IsSet && value.SamlFirstNameKeyAttr != null) + { + writer.WritePropertyName("saml_first_name_key_attr"); + serializer.Serialize(writer, value.SamlFirstNameKeyAttr); + } + if (value.SamlLastNameKeyAttrOption.IsSet && value.SamlLastNameKeyAttr != null) + { + writer.WritePropertyName("saml_last_name_key_attr"); + serializer.Serialize(writer, value.SamlLastNameKeyAttr); + } + if (value.IsCreateMissingUserOption.IsSet && value.IsCreateMissingUser != null) + { + writer.WritePropertyName("is_create_missing_user"); + serializer.Serialize(writer, value.IsCreateMissingUser); + } + if (value.IsForceShowSsoButtonOption.IsSet && value.IsForceShowSsoButton != null) + { + writer.WritePropertyName("is_force_show_sso_button"); + serializer.Serialize(writer, value.IsForceShowSsoButton); + } + if (value.UpstreamParamsOption.IsSet) + { + writer.WritePropertyName("upstream_params"); + serializer.Serialize(writer, value.UpstreamParams); + } + if (value.SamlSigningCertificateOption.IsSet && value.SamlSigningCertificate != null) + { + writer.WritePropertyName("saml_signing_certificate"); + serializer.Serialize(writer, value.SamlSigningCertificate); + } + if (value.SamlSigningPrivateKeyOption.IsSet && value.SamlSigningPrivateKey != null) + { + writer.WritePropertyName("saml_signing_private_key"); + serializer.Serialize(writer, value.SamlSigningPrivateKey); + } + if (value.IsAutoJoinOrganizationEnabledOption.IsSet && value.IsAutoJoinOrganizationEnabled != null) + { + writer.WritePropertyName("is_auto_join_organization_enabled"); + serializer.Serialize(writer, value.IsAutoJoinOrganizationEnabled); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOfNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOfNewtonsoftConverter.cs new file mode 100644 index 0000000..0adbc51 --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionRequestOptionsOneOfNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionRequestOptionsOneOf that handles the Option<> structure + /// + public class CreateConnectionRequestOptionsOneOfNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionRequestOptionsOneOf ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionRequestOptionsOneOf existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + bool? isUseCustomDomain = default(bool?); + if (jsonObject["is_use_custom_domain"] != null) + { + isUseCustomDomain = jsonObject["is_use_custom_domain"].ToObject(serializer); + } + + return new CreateConnectionRequestOptionsOneOf( + clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default, isUseCustomDomain: isUseCustomDomain != null ? new Option(isUseCustomDomain) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionRequestOptionsOneOf value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + if (value.IsUseCustomDomainOption.IsSet && value.IsUseCustomDomain != null) + { + writer.WritePropertyName("is_use_custom_domain"); + serializer.Serialize(writer, value.IsUseCustomDomain); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionResponseConnectionNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionResponseConnectionNewtonsoftConverter.cs new file mode 100644 index 0000000..8cf2510 --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionResponseConnectionNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionResponseConnection that handles the Option<> structure + /// + public class CreateConnectionResponseConnectionNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionResponseConnection ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionResponseConnection existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateConnectionResponseConnection( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionResponseConnection value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateConnectionResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateConnectionResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..68b45c0 --- /dev/null +++ b/Kinde.Api/Converters/CreateConnectionResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateConnectionResponse that handles the Option<> structure + /// + public class CreateConnectionResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateConnectionResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateConnectionResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateConnectionResponseConnection? connection = default(CreateConnectionResponseConnection?); + if (jsonObject["connection"] != null) + { + connection = jsonObject["connection"].ToObject(serializer); + } + + return new CreateConnectionResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, connection: connection != null ? new Option(connection) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateConnectionResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.ConnectionOption.IsSet && value.Connection != null) + { + writer.WritePropertyName("connection"); + serializer.Serialize(writer, value.Connection); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateEnvironmentVariableRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateEnvironmentVariableRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..eb30663 --- /dev/null +++ b/Kinde.Api/Converters/CreateEnvironmentVariableRequestNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateEnvironmentVariableRequest that handles the Option<> structure + /// + public class CreateEnvironmentVariableRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateEnvironmentVariableRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateEnvironmentVariableRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? isSecret = default(bool?); + if (jsonObject["is_secret"] != null) + { + isSecret = jsonObject["is_secret"].ToObject(serializer); + } + string key = default(string); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string value = default(string); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + + return new CreateEnvironmentVariableRequest( + isSecret: isSecret != null ? new Option(isSecret) : default, key: key, value: value ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateEnvironmentVariableRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IsSecretOption.IsSet && value.IsSecret != null) + { + writer.WritePropertyName("is_secret"); + serializer.Serialize(writer, value.IsSecret); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateEnvironmentVariableResponseEnvironmentVariableNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateEnvironmentVariableResponseEnvironmentVariableNewtonsoftConverter.cs new file mode 100644 index 0000000..24e1fb1 --- /dev/null +++ b/Kinde.Api/Converters/CreateEnvironmentVariableResponseEnvironmentVariableNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateEnvironmentVariableResponseEnvironmentVariable that handles the Option<> structure + /// + public class CreateEnvironmentVariableResponseEnvironmentVariableNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateEnvironmentVariableResponseEnvironmentVariable ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateEnvironmentVariableResponseEnvironmentVariable existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateEnvironmentVariableResponseEnvironmentVariable( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateEnvironmentVariableResponseEnvironmentVariable value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateEnvironmentVariableResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateEnvironmentVariableResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..5665f95 --- /dev/null +++ b/Kinde.Api/Converters/CreateEnvironmentVariableResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateEnvironmentVariableResponse that handles the Option<> structure + /// + public class CreateEnvironmentVariableResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateEnvironmentVariableResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateEnvironmentVariableResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateEnvironmentVariableResponseEnvironmentVariable? environmentVariable = default(CreateEnvironmentVariableResponseEnvironmentVariable?); + if (jsonObject["environment_variable"] != null) + { + environmentVariable = jsonObject["environment_variable"].ToObject(serializer); + } + + return new CreateEnvironmentVariableResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, environmentVariable: environmentVariable != null ? new Option(environmentVariable) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateEnvironmentVariableResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.EnvironmentVariableOption.IsSet && value.EnvironmentVariable != null) + { + writer.WritePropertyName("environment_variable"); + serializer.Serialize(writer, value.EnvironmentVariable); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateFeatureFlagRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateFeatureFlagRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..715307e --- /dev/null +++ b/Kinde.Api/Converters/CreateFeatureFlagRequestNewtonsoftConverter.cs @@ -0,0 +1,89 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateFeatureFlagRequest that handles the Option<> structure + /// + public class CreateFeatureFlagRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateFeatureFlagRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateFeatureFlagRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + CreateFeatureFlagRequest.AllowOverrideLevelEnum? allowOverrideLevel = default(CreateFeatureFlagRequest.AllowOverrideLevelEnum?); + if (jsonObject["allow_override_level"] != null) + { + var allowOverrideLevelStr = jsonObject["allow_override_level"].ToObject(); + if (!string.IsNullOrEmpty(allowOverrideLevelStr)) + { + allowOverrideLevel = CreateFeatureFlagRequest.AllowOverrideLevelEnumFromString(allowOverrideLevelStr); + } + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string key = default(string); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + CreateFeatureFlagRequest.TypeEnum type = default(CreateFeatureFlagRequest.TypeEnum); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = CreateFeatureFlagRequest.TypeEnumFromString(typeStr); + } + } + string defaultValue = default(string); + if (jsonObject["default_value"] != null) + { + defaultValue = jsonObject["default_value"].ToObject(); + } + + return new CreateFeatureFlagRequest( + allowOverrideLevel: allowOverrideLevel != null ? new Option(allowOverrideLevel) : default, description: description != null ? new Option(description) : default, name: name, key: key, type: type, defaultValue: defaultValue ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateFeatureFlagRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.AllowOverrideLevelOption.IsSet && value.AllowOverrideLevel != null) + { + writer.WritePropertyName("allow_override_level"); + var allowOverrideLevelStr = CreateFeatureFlagRequest.AllowOverrideLevelEnumToJsonValue(value.AllowOverrideLevel.Value); + writer.WriteValue(allowOverrideLevelStr); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateIdentityResponseIdentityNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateIdentityResponseIdentityNewtonsoftConverter.cs new file mode 100644 index 0000000..78ef2bd --- /dev/null +++ b/Kinde.Api/Converters/CreateIdentityResponseIdentityNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateIdentityResponseIdentity that handles the Option<> structure + /// + public class CreateIdentityResponseIdentityNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateIdentityResponseIdentity ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateIdentityResponseIdentity existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateIdentityResponseIdentity( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateIdentityResponseIdentity value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateIdentityResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateIdentityResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..3ec6f45 --- /dev/null +++ b/Kinde.Api/Converters/CreateIdentityResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateIdentityResponse that handles the Option<> structure + /// + public class CreateIdentityResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateIdentityResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateIdentityResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateIdentityResponseIdentity? identity = default(CreateIdentityResponseIdentity?); + if (jsonObject["identity"] != null) + { + identity = jsonObject["identity"].ToObject(serializer); + } + + return new CreateIdentityResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, identity: identity != null ? new Option(identity) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateIdentityResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.IdentityOption.IsSet && value.Identity != null) + { + writer.WritePropertyName("identity"); + serializer.Serialize(writer, value.Identity); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateMeterUsageRecordRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateMeterUsageRecordRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..0067111 --- /dev/null +++ b/Kinde.Api/Converters/CreateMeterUsageRecordRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateMeterUsageRecordRequest that handles the Option<> structure + /// + public class CreateMeterUsageRecordRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateMeterUsageRecordRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateMeterUsageRecordRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + CreateMeterUsageRecordRequest.MeterTypeCodeEnum? meterTypeCode = default(CreateMeterUsageRecordRequest.MeterTypeCodeEnum?); + if (jsonObject["meter_type_code"] != null) + { + var meterTypeCodeStr = jsonObject["meter_type_code"].ToObject(); + if (!string.IsNullOrEmpty(meterTypeCodeStr)) + { + meterTypeCode = CreateMeterUsageRecordRequest.MeterTypeCodeEnumFromString(meterTypeCodeStr); + } + } + DateTimeOffset? meterUsageTimestamp = default(DateTimeOffset?); + if (jsonObject["meter_usage_timestamp"] != null) + { + meterUsageTimestamp = jsonObject["meter_usage_timestamp"].ToObject(serializer); + } + string customerAgreementId = default(string); + if (jsonObject["customer_agreement_id"] != null) + { + customerAgreementId = jsonObject["customer_agreement_id"].ToObject(); + } + string billingFeatureCode = default(string); + if (jsonObject["billing_feature_code"] != null) + { + billingFeatureCode = jsonObject["billing_feature_code"].ToObject(); + } + string meterValue = default(string); + if (jsonObject["meter_value"] != null) + { + meterValue = jsonObject["meter_value"].ToObject(); + } + + return new CreateMeterUsageRecordRequest( + meterTypeCode: meterTypeCode != null ? new Option(meterTypeCode) : default, meterUsageTimestamp: meterUsageTimestamp != null ? new Option(meterUsageTimestamp) : default, customerAgreementId: customerAgreementId, billingFeatureCode: billingFeatureCode, meterValue: meterValue ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateMeterUsageRecordRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MeterTypeCodeOption.IsSet && value.MeterTypeCode != null) + { + writer.WritePropertyName("meter_type_code"); + var meterTypeCodeStr = CreateMeterUsageRecordRequest.MeterTypeCodeEnumToJsonValue(value.MeterTypeCode.Value); + writer.WriteValue(meterTypeCodeStr); + } + if (value.MeterUsageTimestampOption.IsSet && value.MeterUsageTimestamp != null) + { + writer.WritePropertyName("meter_usage_timestamp"); + serializer.Serialize(writer, value.MeterUsageTimestamp); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateMeterUsageRecordResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateMeterUsageRecordResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..59ace17 --- /dev/null +++ b/Kinde.Api/Converters/CreateMeterUsageRecordResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateMeterUsageRecordResponse that handles the Option<> structure + /// + public class CreateMeterUsageRecordResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateMeterUsageRecordResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateMeterUsageRecordResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + + return new CreateMeterUsageRecordResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateMeterUsageRecordResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateOrganizationRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateOrganizationRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..decba7a --- /dev/null +++ b/Kinde.Api/Converters/CreateOrganizationRequestNewtonsoftConverter.cs @@ -0,0 +1,225 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateOrganizationRequest that handles the Option<> structure + /// + public class CreateOrganizationRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateOrganizationRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateOrganizationRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + Dictionary featureFlags = default(Dictionary); + if (jsonObject["feature_flags"] != null) + { + featureFlags = jsonObject["feature_flags"].ToObject>(serializer); + } + string? externalId = default(string?); + if (jsonObject["external_id"] != null) + { + externalId = jsonObject["external_id"].ToObject(); + } + string? backgroundColor = default(string?); + if (jsonObject["background_color"] != null) + { + backgroundColor = jsonObject["background_color"].ToObject(); + } + string? buttonColor = default(string?); + if (jsonObject["button_color"] != null) + { + buttonColor = jsonObject["button_color"].ToObject(); + } + string? buttonTextColor = default(string?); + if (jsonObject["button_text_color"] != null) + { + buttonTextColor = jsonObject["button_text_color"].ToObject(); + } + string? linkColor = default(string?); + if (jsonObject["link_color"] != null) + { + linkColor = jsonObject["link_color"].ToObject(); + } + string? backgroundColorDark = default(string?); + if (jsonObject["background_color_dark"] != null) + { + backgroundColorDark = jsonObject["background_color_dark"].ToObject(); + } + string? buttonColorDark = default(string?); + if (jsonObject["button_color_dark"] != null) + { + buttonColorDark = jsonObject["button_color_dark"].ToObject(); + } + string? buttonTextColorDark = default(string?); + if (jsonObject["button_text_color_dark"] != null) + { + buttonTextColorDark = jsonObject["button_text_color_dark"].ToObject(); + } + string? linkColorDark = default(string?); + if (jsonObject["link_color_dark"] != null) + { + linkColorDark = jsonObject["link_color_dark"].ToObject(); + } + string? themeCode = default(string?); + if (jsonObject["theme_code"] != null) + { + themeCode = jsonObject["theme_code"].ToObject(); + } + string? handle = default(string?); + if (jsonObject["handle"] != null) + { + handle = jsonObject["handle"].ToObject(); + } + bool? isAllowRegistrations = default(bool?); + if (jsonObject["is_allow_registrations"] != null) + { + isAllowRegistrations = jsonObject["is_allow_registrations"].ToObject(serializer); + } + string? senderName = default(string?); + if (jsonObject["sender_name"] != null) + { + senderName = jsonObject["sender_name"].ToObject(); + } + string? senderEmail = default(string?); + if (jsonObject["sender_email"] != null) + { + senderEmail = jsonObject["sender_email"].ToObject(); + } + bool? isCreateBillingCustomer = default(bool?); + if (jsonObject["is_create_billing_customer"] != null) + { + isCreateBillingCustomer = jsonObject["is_create_billing_customer"].ToObject(serializer); + } + string? billingEmail = default(string?); + if (jsonObject["billing_email"] != null) + { + billingEmail = jsonObject["billing_email"].ToObject(); + } + string? billingPlanCode = default(string?); + if (jsonObject["billing_plan_code"] != null) + { + billingPlanCode = jsonObject["billing_plan_code"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new CreateOrganizationRequest( + featureFlags: featureFlags != null ? new Option>(featureFlags) : default, externalId: externalId != null ? new Option(externalId) : default, backgroundColor: backgroundColor != null ? new Option(backgroundColor) : default, buttonColor: buttonColor != null ? new Option(buttonColor) : default, buttonTextColor: buttonTextColor != null ? new Option(buttonTextColor) : default, linkColor: linkColor != null ? new Option(linkColor) : default, backgroundColorDark: backgroundColorDark != null ? new Option(backgroundColorDark) : default, buttonColorDark: buttonColorDark != null ? new Option(buttonColorDark) : default, buttonTextColorDark: buttonTextColorDark != null ? new Option(buttonTextColorDark) : default, linkColorDark: linkColorDark != null ? new Option(linkColorDark) : default, themeCode: themeCode != null ? new Option(themeCode) : default, handle: handle != null ? new Option(handle) : default, isAllowRegistrations: isAllowRegistrations != null ? new Option(isAllowRegistrations) : default, senderName: senderName != null ? new Option(senderName) : default, senderEmail: senderEmail != null ? new Option(senderEmail) : default, isCreateBillingCustomer: isCreateBillingCustomer != null ? new Option(isCreateBillingCustomer) : default, billingEmail: billingEmail != null ? new Option(billingEmail) : default, billingPlanCode: billingPlanCode != null ? new Option(billingPlanCode) : default, name: name ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateOrganizationRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.FeatureFlagsOption.IsSet) + { + writer.WritePropertyName("feature_flags"); + serializer.Serialize(writer, value.FeatureFlags); + } + if (value.ExternalIdOption.IsSet && value.ExternalId != null) + { + writer.WritePropertyName("external_id"); + serializer.Serialize(writer, value.ExternalId); + } + if (value.BackgroundColorOption.IsSet && value.BackgroundColor != null) + { + writer.WritePropertyName("background_color"); + serializer.Serialize(writer, value.BackgroundColor); + } + if (value.ButtonColorOption.IsSet && value.ButtonColor != null) + { + writer.WritePropertyName("button_color"); + serializer.Serialize(writer, value.ButtonColor); + } + if (value.ButtonTextColorOption.IsSet && value.ButtonTextColor != null) + { + writer.WritePropertyName("button_text_color"); + serializer.Serialize(writer, value.ButtonTextColor); + } + if (value.LinkColorOption.IsSet && value.LinkColor != null) + { + writer.WritePropertyName("link_color"); + serializer.Serialize(writer, value.LinkColor); + } + if (value.BackgroundColorDarkOption.IsSet && value.BackgroundColorDark != null) + { + writer.WritePropertyName("background_color_dark"); + serializer.Serialize(writer, value.BackgroundColorDark); + } + if (value.ButtonColorDarkOption.IsSet && value.ButtonColorDark != null) + { + writer.WritePropertyName("button_color_dark"); + serializer.Serialize(writer, value.ButtonColorDark); + } + if (value.ButtonTextColorDarkOption.IsSet && value.ButtonTextColorDark != null) + { + writer.WritePropertyName("button_text_color_dark"); + serializer.Serialize(writer, value.ButtonTextColorDark); + } + if (value.LinkColorDarkOption.IsSet && value.LinkColorDark != null) + { + writer.WritePropertyName("link_color_dark"); + serializer.Serialize(writer, value.LinkColorDark); + } + if (value.ThemeCodeOption.IsSet && value.ThemeCode != null) + { + writer.WritePropertyName("theme_code"); + serializer.Serialize(writer, value.ThemeCode); + } + if (value.HandleOption.IsSet && value.Handle != null) + { + writer.WritePropertyName("handle"); + serializer.Serialize(writer, value.Handle); + } + if (value.IsAllowRegistrationsOption.IsSet && value.IsAllowRegistrations != null) + { + writer.WritePropertyName("is_allow_registrations"); + serializer.Serialize(writer, value.IsAllowRegistrations); + } + if (value.SenderNameOption.IsSet && value.SenderName != null) + { + writer.WritePropertyName("sender_name"); + serializer.Serialize(writer, value.SenderName); + } + if (value.SenderEmailOption.IsSet && value.SenderEmail != null) + { + writer.WritePropertyName("sender_email"); + serializer.Serialize(writer, value.SenderEmail); + } + if (value.IsCreateBillingCustomerOption.IsSet && value.IsCreateBillingCustomer != null) + { + writer.WritePropertyName("is_create_billing_customer"); + serializer.Serialize(writer, value.IsCreateBillingCustomer); + } + if (value.BillingEmailOption.IsSet && value.BillingEmail != null) + { + writer.WritePropertyName("billing_email"); + serializer.Serialize(writer, value.BillingEmail); + } + if (value.BillingPlanCodeOption.IsSet && value.BillingPlanCode != null) + { + writer.WritePropertyName("billing_plan_code"); + serializer.Serialize(writer, value.BillingPlanCode); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateOrganizationResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateOrganizationResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..005cfcd --- /dev/null +++ b/Kinde.Api/Converters/CreateOrganizationResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateOrganizationResponse that handles the Option<> structure + /// + public class CreateOrganizationResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateOrganizationResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateOrganizationResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreateOrganizationResponseOrganization? organization = default(CreateOrganizationResponseOrganization?); + if (jsonObject["organization"] != null) + { + organization = jsonObject["organization"].ToObject(serializer); + } + + return new CreateOrganizationResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, organization: organization != null ? new Option(organization) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateOrganizationResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.OrganizationOption.IsSet && value.Organization != null) + { + writer.WritePropertyName("organization"); + serializer.Serialize(writer, value.Organization); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateOrganizationResponseOrganizationNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateOrganizationResponseOrganizationNewtonsoftConverter.cs new file mode 100644 index 0000000..43058f7 --- /dev/null +++ b/Kinde.Api/Converters/CreateOrganizationResponseOrganizationNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateOrganizationResponseOrganization that handles the Option<> structure + /// + public class CreateOrganizationResponseOrganizationNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateOrganizationResponseOrganization ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateOrganizationResponseOrganization existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? billingCustomerId = default(string?); + if (jsonObject["billing_customer_id"] != null) + { + billingCustomerId = jsonObject["billing_customer_id"].ToObject(); + } + + return new CreateOrganizationResponseOrganization( + code: code != null ? new Option(code) : default, billingCustomerId: billingCustomerId != null ? new Option(billingCustomerId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateOrganizationResponseOrganization value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.BillingCustomerIdOption.IsSet && value.BillingCustomerId != null) + { + writer.WritePropertyName("billing_customer_id"); + serializer.Serialize(writer, value.BillingCustomerId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateOrganizationUserPermissionRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateOrganizationUserPermissionRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..a98b299 --- /dev/null +++ b/Kinde.Api/Converters/CreateOrganizationUserPermissionRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateOrganizationUserPermissionRequest that handles the Option<> structure + /// + public class CreateOrganizationUserPermissionRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateOrganizationUserPermissionRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateOrganizationUserPermissionRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? permissionId = default(string?); + if (jsonObject["permission_id"] != null) + { + permissionId = jsonObject["permission_id"].ToObject(); + } + + return new CreateOrganizationUserPermissionRequest( + permissionId: permissionId != null ? new Option(permissionId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateOrganizationUserPermissionRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.PermissionIdOption.IsSet && value.PermissionId != null) + { + writer.WritePropertyName("permission_id"); + serializer.Serialize(writer, value.PermissionId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateOrganizationUserRoleRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateOrganizationUserRoleRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..53b88e2 --- /dev/null +++ b/Kinde.Api/Converters/CreateOrganizationUserRoleRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateOrganizationUserRoleRequest that handles the Option<> structure + /// + public class CreateOrganizationUserRoleRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateOrganizationUserRoleRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateOrganizationUserRoleRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? roleId = default(string?); + if (jsonObject["role_id"] != null) + { + roleId = jsonObject["role_id"].ToObject(); + } + + return new CreateOrganizationUserRoleRequest( + roleId: roleId != null ? new Option(roleId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateOrganizationUserRoleRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.RoleIdOption.IsSet && value.RoleId != null) + { + writer.WritePropertyName("role_id"); + serializer.Serialize(writer, value.RoleId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreatePermissionRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreatePermissionRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..dee9ec8 --- /dev/null +++ b/Kinde.Api/Converters/CreatePermissionRequestNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreatePermissionRequest that handles the Option<> structure + /// + public class CreatePermissionRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreatePermissionRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreatePermissionRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new CreatePermissionRequest( + name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreatePermissionRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreatePropertyRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreatePropertyRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..c039ff4 --- /dev/null +++ b/Kinde.Api/Converters/CreatePropertyRequestNewtonsoftConverter.cs @@ -0,0 +1,88 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreatePropertyRequest that handles the Option<> structure + /// + public class CreatePropertyRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreatePropertyRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreatePropertyRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string key = default(string); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + CreatePropertyRequest.TypeEnum type = default(CreatePropertyRequest.TypeEnum); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = CreatePropertyRequest.TypeEnumFromString(typeStr); + } + } + CreatePropertyRequest.ContextEnum context = default(CreatePropertyRequest.ContextEnum); + if (jsonObject["context"] != null) + { + var contextStr = jsonObject["context"].ToObject(); + if (!string.IsNullOrEmpty(contextStr)) + { + context = CreatePropertyRequest.ContextEnumFromString(contextStr); + } + } + bool isPrivate = default(bool); + if (jsonObject["is_private"] != null) + { + isPrivate = jsonObject["is_private"].ToObject(serializer); + } + string categoryId = default(string); + if (jsonObject["category_id"] != null) + { + categoryId = jsonObject["category_id"].ToObject(); + } + + return new CreatePropertyRequest( + description: description != null ? new Option(description) : default, name: name, key: key, type: type, context: context, isPrivate: isPrivate, categoryId: categoryId ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreatePropertyRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreatePropertyResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreatePropertyResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..82e8a04 --- /dev/null +++ b/Kinde.Api/Converters/CreatePropertyResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreatePropertyResponse that handles the Option<> structure + /// + public class CreatePropertyResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreatePropertyResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreatePropertyResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + CreatePropertyResponseProperty? property = default(CreatePropertyResponseProperty?); + if (jsonObject["property"] != null) + { + property = jsonObject["property"].ToObject(serializer); + } + + return new CreatePropertyResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, property: property != null ? new Option(property) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreatePropertyResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.PropertyOption.IsSet && value.Property != null) + { + writer.WritePropertyName("property"); + serializer.Serialize(writer, value.Property); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreatePropertyResponsePropertyNewtonsoftConverter.cs b/Kinde.Api/Converters/CreatePropertyResponsePropertyNewtonsoftConverter.cs new file mode 100644 index 0000000..e0bd223 --- /dev/null +++ b/Kinde.Api/Converters/CreatePropertyResponsePropertyNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreatePropertyResponseProperty that handles the Option<> structure + /// + public class CreatePropertyResponsePropertyNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreatePropertyResponseProperty ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreatePropertyResponseProperty existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreatePropertyResponseProperty( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreatePropertyResponseProperty value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateRoleRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateRoleRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..b01a9ea --- /dev/null +++ b/Kinde.Api/Converters/CreateRoleRequestNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateRoleRequest that handles the Option<> structure + /// + public class CreateRoleRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateRoleRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateRoleRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + bool? isDefaultRole = default(bool?); + if (jsonObject["is_default_role"] != null) + { + isDefaultRole = jsonObject["is_default_role"].ToObject(serializer); + } + Guid? assignmentPermissionId = default(Guid?); + if (jsonObject["assignment_permission_id"] != null) + { + assignmentPermissionId = jsonObject["assignment_permission_id"].ToObject(serializer); + } + + return new CreateRoleRequest( + name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, key: key != null ? new Option(key) : default, isDefaultRole: isDefaultRole != null ? new Option(isDefaultRole) : default, assignmentPermissionId: assignmentPermissionId != null ? new Option(assignmentPermissionId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateRoleRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.IsDefaultRoleOption.IsSet && value.IsDefaultRole != null) + { + writer.WritePropertyName("is_default_role"); + serializer.Serialize(writer, value.IsDefaultRole); + } + if (value.AssignmentPermissionIdOption.IsSet && value.AssignmentPermissionId != null) + { + writer.WritePropertyName("assignment_permission_id"); + serializer.Serialize(writer, value.AssignmentPermissionId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateRolesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateRolesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..3d58d46 --- /dev/null +++ b/Kinde.Api/Converters/CreateRolesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateRolesResponse that handles the Option<> structure + /// + public class CreateRolesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateRolesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateRolesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + CreateRolesResponseRole? role = default(CreateRolesResponseRole?); + if (jsonObject["role"] != null) + { + role = jsonObject["role"].ToObject(serializer); + } + + return new CreateRolesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, role: role != null ? new Option(role) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateRolesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.RoleOption.IsSet && value.Role != null) + { + writer.WritePropertyName("role"); + serializer.Serialize(writer, value.Role); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateRolesResponseRoleNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateRolesResponseRoleNewtonsoftConverter.cs new file mode 100644 index 0000000..0eb0402 --- /dev/null +++ b/Kinde.Api/Converters/CreateRolesResponseRoleNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateRolesResponseRole that handles the Option<> structure + /// + public class CreateRolesResponseRoleNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateRolesResponseRole ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateRolesResponseRole existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new CreateRolesResponseRole( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateRolesResponseRole value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateSubscriberSuccessResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateSubscriberSuccessResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..73e125c --- /dev/null +++ b/Kinde.Api/Converters/CreateSubscriberSuccessResponseNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateSubscriberSuccessResponse that handles the Option<> structure + /// + public class CreateSubscriberSuccessResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateSubscriberSuccessResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateSubscriberSuccessResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + CreateSubscriberSuccessResponseSubscriber? subscriber = default(CreateSubscriberSuccessResponseSubscriber?); + if (jsonObject["subscriber"] != null) + { + subscriber = jsonObject["subscriber"].ToObject(serializer); + } + + return new CreateSubscriberSuccessResponse( + subscriber: subscriber != null ? new Option(subscriber) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateSubscriberSuccessResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.SubscriberOption.IsSet && value.Subscriber != null) + { + writer.WritePropertyName("subscriber"); + serializer.Serialize(writer, value.Subscriber); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateSubscriberSuccessResponseSubscriberNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateSubscriberSuccessResponseSubscriberNewtonsoftConverter.cs new file mode 100644 index 0000000..6cc81c4 --- /dev/null +++ b/Kinde.Api/Converters/CreateSubscriberSuccessResponseSubscriberNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateSubscriberSuccessResponseSubscriber that handles the Option<> structure + /// + public class CreateSubscriberSuccessResponseSubscriberNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateSubscriberSuccessResponseSubscriber ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateSubscriberSuccessResponseSubscriber existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? subscriberId = default(string?); + if (jsonObject["subscriber_id"] != null) + { + subscriberId = jsonObject["subscriber_id"].ToObject(); + } + + return new CreateSubscriberSuccessResponseSubscriber( + subscriberId: subscriberId != null ? new Option(subscriberId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateSubscriberSuccessResponseSubscriber value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.SubscriberIdOption.IsSet && value.SubscriberId != null) + { + writer.WritePropertyName("subscriber_id"); + serializer.Serialize(writer, value.SubscriberId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateUserRequestIdentitiesInnerDetailsNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateUserRequestIdentitiesInnerDetailsNewtonsoftConverter.cs new file mode 100644 index 0000000..daa07cd --- /dev/null +++ b/Kinde.Api/Converters/CreateUserRequestIdentitiesInnerDetailsNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateUserRequestIdentitiesInnerDetails that handles the Option<> structure + /// + public class CreateUserRequestIdentitiesInnerDetailsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateUserRequestIdentitiesInnerDetails ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateUserRequestIdentitiesInnerDetails existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? phone = default(string?); + if (jsonObject["phone"] != null) + { + phone = jsonObject["phone"].ToObject(); + } + string? phoneCountryId = default(string?); + if (jsonObject["phone_country_id"] != null) + { + phoneCountryId = jsonObject["phone_country_id"].ToObject(); + } + string? username = default(string?); + if (jsonObject["username"] != null) + { + username = jsonObject["username"].ToObject(); + } + + return new CreateUserRequestIdentitiesInnerDetails( + email: email != null ? new Option(email) : default, phone: phone != null ? new Option(phone) : default, phoneCountryId: phoneCountryId != null ? new Option(phoneCountryId) : default, username: username != null ? new Option(username) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserRequestIdentitiesInnerDetails value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.PhoneOption.IsSet && value.Phone != null) + { + writer.WritePropertyName("phone"); + serializer.Serialize(writer, value.Phone); + } + if (value.PhoneCountryIdOption.IsSet && value.PhoneCountryId != null) + { + writer.WritePropertyName("phone_country_id"); + serializer.Serialize(writer, value.PhoneCountryId); + } + if (value.UsernameOption.IsSet && value.Username != null) + { + writer.WritePropertyName("username"); + serializer.Serialize(writer, value.Username); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateUserRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateUserRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..513008a --- /dev/null +++ b/Kinde.Api/Converters/CreateUserRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateUserRequest that handles the Option<> structure + /// + public class CreateUserRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateUserRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateUserRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + CreateUserRequestProfile? profile = default(CreateUserRequestProfile?); + if (jsonObject["profile"] != null) + { + profile = jsonObject["profile"].ToObject(serializer); + } + string? organizationCode = default(string?); + if (jsonObject["organization_code"] != null) + { + organizationCode = jsonObject["organization_code"].ToObject(); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); + } + + return new CreateUserRequest( + profile: profile != null ? new Option(profile) : default, organizationCode: organizationCode != null ? new Option(organizationCode) : default, providedId: providedId != null ? new Option(providedId) : default, identities: identities != null ? new Option?>(identities) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ProfileOption.IsSet && value.Profile != null) + { + writer.WritePropertyName("profile"); + serializer.Serialize(writer, value.Profile); + } + if (value.OrganizationCodeOption.IsSet && value.OrganizationCode != null) + { + writer.WritePropertyName("organization_code"); + serializer.Serialize(writer, value.OrganizationCode); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + if (value.IdentitiesOption.IsSet) + { + writer.WritePropertyName("identities"); + serializer.Serialize(writer, value.Identities); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateUserRequestProfileNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateUserRequestProfileNewtonsoftConverter.cs new file mode 100644 index 0000000..4d810bc --- /dev/null +++ b/Kinde.Api/Converters/CreateUserRequestProfileNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateUserRequestProfile that handles the Option<> structure + /// + public class CreateUserRequestProfileNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateUserRequestProfile ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateUserRequestProfile existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? givenName = default(string?); + if (jsonObject["given_name"] != null) + { + givenName = jsonObject["given_name"].ToObject(); + } + string? familyName = default(string?); + if (jsonObject["family_name"] != null) + { + familyName = jsonObject["family_name"].ToObject(); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + + return new CreateUserRequestProfile( + givenName: givenName != null ? new Option(givenName) : default, familyName: familyName != null ? new Option(familyName) : default, picture: picture != null ? new Option(picture) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserRequestProfile value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.GivenNameOption.IsSet && value.GivenName != null) + { + writer.WritePropertyName("given_name"); + serializer.Serialize(writer, value.GivenName); + } + if (value.FamilyNameOption.IsSet && value.FamilyName != null) + { + writer.WritePropertyName("family_name"); + serializer.Serialize(writer, value.FamilyName); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateUserResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateUserResponseNewtonsoftConverter.cs index 2134eed..20c71f9 100644 --- a/Kinde.Api/Converters/CreateUserResponseNewtonsoftConverter.cs +++ b/Kinde.Api/Converters/CreateUserResponseNewtonsoftConverter.cs @@ -1,14 +1,14 @@ using System; using System.Collections.Generic; -using System.Linq; using Newtonsoft.Json; +using Newtonsoft.Json.Linq; using Kinde.Api.Model; using Kinde.Api.Client; namespace Kinde.Api.Converters { /// - /// Newtonsoft.Json converter for CreateUserResponse that handles the Option> structure + /// Newtonsoft.Json converter for CreateUserResponse that handles the Option<> structure /// public class CreateUserResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter { @@ -22,57 +22,26 @@ public override CreateUserResponse ReadJson(Newtonsoft.Json.JsonReader reader, T throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); } - string? id = null; - bool? created = null; - List? identities = null; + var jsonObject = JObject.Load(reader); - while (reader.Read()) + string? id = default(string?); + if (jsonObject["id"] != null) { - if (reader.TokenType == Newtonsoft.Json.JsonToken.EndObject) - break; - - if (reader.TokenType == Newtonsoft.Json.JsonToken.PropertyName) - { - string? propertyName = reader.Value?.ToString(); - reader.Read(); - - switch (propertyName) - { - case "id": - id = reader.Value?.ToString(); - break; - case "created": - created = reader.Value != null ? Convert.ToBoolean(reader.Value) : null; - break; - case "identities": - if (reader.TokenType == Newtonsoft.Json.JsonToken.StartArray) - { - identities = new List(); - while (reader.Read() && reader.TokenType != Newtonsoft.Json.JsonToken.EndArray) - { - if (reader.TokenType == Newtonsoft.Json.JsonToken.StartObject) - { - var identity = serializer.Deserialize(reader); - if (identity != null) - { - identities.Add(identity); - } - } - } - } - break; - default: - reader.Skip(); - break; - } - } + id = jsonObject["id"].ToObject(); + } + bool? created = default(bool?); + if (jsonObject["created"] != null) + { + created = jsonObject["created"].ToObject(serializer); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); } return new CreateUserResponse( - id: id != null ? new Option(id) : default, - created: created != null ? new Option(created) : default, - identities: identities != null ? new Option?>(identities) : default - ); + id: id != null ? new Option(id) : default, created: created != null ? new Option(created) : default, identities: identities != null ? new Option?>(identities) : default ); } public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserResponse value, Newtonsoft.Json.JsonSerializer serializer) @@ -82,16 +51,14 @@ public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserResp if (value.IdOption.IsSet && value.Id != null) { writer.WritePropertyName("id"); - writer.WriteValue(value.Id); + serializer.Serialize(writer, value.Id); } - if (value.CreatedOption.IsSet && value.Created != null) { writer.WritePropertyName("created"); - writer.WriteValue(value.Created.Value); + serializer.Serialize(writer, value.Created); } - - if (value.IdentitiesOption.IsSet && value.Identities != null) + if (value.IdentitiesOption.IsSet) { writer.WritePropertyName("identities"); serializer.Serialize(writer, value.Identities); @@ -100,4 +67,4 @@ public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateUserResp writer.WriteEndObject(); } } -} +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateWebHookRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateWebHookRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..604da01 --- /dev/null +++ b/Kinde.Api/Converters/CreateWebHookRequestNewtonsoftConverter.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateWebHookRequest that handles the Option<> structure + /// + public class CreateWebHookRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateWebHookRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateWebHookRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string endpoint = default(string); + if (jsonObject["endpoint"] != null) + { + endpoint = jsonObject["endpoint"].ToObject(); + } + List eventTypes = default(List); + if (jsonObject["event_types"] != null) + { + eventTypes = jsonObject["event_types"].ToObject>(serializer); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new CreateWebHookRequest( + description: description != null ? new Option(description) : default, endpoint: endpoint, eventTypes: eventTypes, name: name ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateWebHookRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateWebhookResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateWebhookResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..da08693 --- /dev/null +++ b/Kinde.Api/Converters/CreateWebhookResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateWebhookResponse that handles the Option<> structure + /// + public class CreateWebhookResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateWebhookResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateWebhookResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + CreateWebhookResponseWebhook? webhook = default(CreateWebhookResponseWebhook?); + if (jsonObject["webhook"] != null) + { + webhook = jsonObject["webhook"].ToObject(serializer); + } + + return new CreateWebhookResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, webhook: webhook != null ? new Option(webhook) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateWebhookResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.WebhookOption.IsSet && value.Webhook != null) + { + writer.WritePropertyName("webhook"); + serializer.Serialize(writer, value.Webhook); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CreateWebhookResponseWebhookNewtonsoftConverter.cs b/Kinde.Api/Converters/CreateWebhookResponseWebhookNewtonsoftConverter.cs new file mode 100644 index 0000000..31897cf --- /dev/null +++ b/Kinde.Api/Converters/CreateWebhookResponseWebhookNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for CreateWebhookResponseWebhook that handles the Option<> structure + /// + public class CreateWebhookResponseWebhookNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override CreateWebhookResponseWebhook ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, CreateWebhookResponseWebhook existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? endpoint = default(string?); + if (jsonObject["endpoint"] != null) + { + endpoint = jsonObject["endpoint"].ToObject(); + } + + return new CreateWebhookResponseWebhook( + id: id != null ? new Option(id) : default, endpoint: endpoint != null ? new Option(endpoint) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, CreateWebhookResponseWebhook value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.EndpointOption.IsSet && value.Endpoint != null) + { + writer.WritePropertyName("endpoint"); + serializer.Serialize(writer, value.Endpoint); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/CustomEnumConverter.cs b/Kinde.Api/Converters/CustomEnumConverter.cs index c601b7e..62cda0e 100644 --- a/Kinde.Api/Converters/CustomEnumConverter.cs +++ b/Kinde.Api/Converters/CustomEnumConverter.cs @@ -1,3 +1,17 @@ +// ============================================================================ +// MANUALLY MAINTAINED FILE - NOT GENERATED BY generate-converters.py +// ============================================================================ +// This file contains generic enum converters that are used across all models. +// These converters handle enum serialization/deserialization using the SDK's +// TypeEnumToJsonValue and TypeEnumFromString methods. +// +// The generate-converters.py script only generates converters for Response +// models (files ending with "Response.cs"). This file must be manually +// maintained and will NOT be overwritten by the generator. +// +// If you need to modify enum conversion logic, update this file directly. +// ============================================================================ + using System.Text.Json; using System.Text.Json.Serialization; using Newtonsoft.Json; @@ -125,22 +139,25 @@ public class NewtonsoftGenericEnumConverter : Newtonsoft.Json.JsonConverter { public override bool CanConvert(Type objectType) { - // Check if this is an enum type - if (!objectType.IsEnum) + // Check if this is an enum type or a nullable enum type + var underlyingType = Nullable.GetUnderlyingType(objectType); + var enumType = underlyingType ?? objectType; + + if (!enumType.IsEnum) return false; // Check if the containing class has the required methods - var containingType = objectType.DeclaringType; + var containingType = enumType.DeclaringType; if (containingType == null) return false; // Check for TypeEnumToJsonValue method with nullable enum parameter - var nullableType = typeof(Nullable<>).MakeGenericType(objectType); + var nullableType = typeof(Nullable<>).MakeGenericType(enumType); var toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { nullableType }); if (toJsonMethod == null) { // Also check for non-nullable version - toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { objectType }); + toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { enumType }); } var fromStringMethod = containingType.GetMethod("TypeEnumFromString", new[] { typeof(string) }); @@ -150,6 +167,14 @@ public override bool CanConvert(Type objectType) public override object ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, object existingValue, Newtonsoft.Json.JsonSerializer serializer) { + if (reader.TokenType == Newtonsoft.Json.JsonToken.Null) + { + // Return null for nullable enums + if (Nullable.GetUnderlyingType(objectType) != null || !objectType.IsValueType) + return null; + throw new Newtonsoft.Json.JsonException($"Cannot convert null to non-nullable enum {objectType.Name}"); + } + if (reader.TokenType != Newtonsoft.Json.JsonToken.String) { throw new Newtonsoft.Json.JsonException($"Unexpected token type: {reader.TokenType}"); @@ -161,37 +186,75 @@ public override object ReadJson(Newtonsoft.Json.JsonReader reader, Type objectTy throw new Newtonsoft.Json.JsonException("String value cannot be null"); } - var containingType = objectType.DeclaringType; + // Check if this is a nullable enum type - unwrap it first to get the actual enum type + var underlyingType = Nullable.GetUnderlyingType(objectType); + var enumType = underlyingType ?? objectType; + var isNullable = underlyingType != null; + + // Get the containing type from the actual enum type, not the nullable wrapper + var containingType = enumType.DeclaringType; if (containingType == null) { - throw new Newtonsoft.Json.JsonException($"Cannot find containing type for enum {objectType.Name}"); + throw new Newtonsoft.Json.JsonException($"Cannot find containing type for enum {enumType.Name}"); + } + + // For nullable enums, use TypeEnumFromStringOrDefault which returns null if conversion fails + // For non-nullable enums, use TypeEnumFromString which throws if conversion fails + MethodInfo? fromStringMethod; + if (isNullable) + { + fromStringMethod = containingType.GetMethod("TypeEnumFromStringOrDefault", new[] { typeof(string) }); + if (fromStringMethod == null) + { + // Fall back to TypeEnumFromString if TypeEnumFromStringOrDefault doesn't exist + fromStringMethod = containingType.GetMethod("TypeEnumFromString", new[] { typeof(string) }); + } + } + else + { + fromStringMethod = containingType.GetMethod("TypeEnumFromString", new[] { typeof(string) }); + if (fromStringMethod == null) + { + // Fall back to TypeEnumFromStringOrDefault if TypeEnumFromString doesn't exist + fromStringMethod = containingType.GetMethod("TypeEnumFromStringOrDefault", new[] { typeof(string) }); + } } - var fromStringMethod = containingType.GetMethod("TypeEnumFromString", new[] { typeof(string) }); if (fromStringMethod == null) { - throw new Newtonsoft.Json.JsonException($"TypeEnumFromString method not found on {containingType.Name}"); + throw new Newtonsoft.Json.JsonException($"TypeEnumFromString or TypeEnumFromStringOrDefault method not found on {containingType.Name}"); } - return fromStringMethod.Invoke(null, new object[] { stringValue })!; + var result = fromStringMethod.Invoke(null, new object[] { stringValue }); + return result ?? (isNullable ? null : throw new Newtonsoft.Json.JsonException($"Could not convert '{stringValue}' to {enumType.Name}")); } public override void WriteJson(Newtonsoft.Json.JsonWriter writer, object value, Newtonsoft.Json.JsonSerializer serializer) { + if (value == null) + { + writer.WriteNull(); + return; + } + var objectType = value.GetType(); - var containingType = objectType.DeclaringType; + // Unwrap nullable type to get the actual enum type + var underlyingType = Nullable.GetUnderlyingType(objectType); + var enumType = underlyingType ?? objectType; + + var containingType = enumType.DeclaringType; if (containingType == null) { - throw new Newtonsoft.Json.JsonException($"Cannot find containing type for enum {objectType.Name}"); + throw new Newtonsoft.Json.JsonException($"Cannot find containing type for enum {enumType.Name}"); } // Try to find the TypeEnumToJsonValue method with nullable parameter first - var nullableType = typeof(Nullable<>).MakeGenericType(objectType); + var nullableType = typeof(Nullable<>).MakeGenericType(enumType); var toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { nullableType }); if (toJsonMethod == null) { // Fall back to non-nullable version - toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { objectType }); + toJsonMethod = containingType.GetMethod("TypeEnumToJsonValue", new[] { enumType }); } if (toJsonMethod == null) @@ -216,8 +279,21 @@ public override bool CanConvert(Type objectType) public override object ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, object existingValue, Newtonsoft.Json.JsonSerializer serializer) { + // Handle null values - return default (unset) Option + if (reader.TokenType == Newtonsoft.Json.JsonToken.Null) + { + return Activator.CreateInstance(objectType)!; + } + var valueType = objectType.GetGenericArguments()[0]; var value = serializer.Deserialize(reader, valueType); + + // If the deserialized value is null, return default (unset) Option + if (value == null) + { + return Activator.CreateInstance(objectType)!; + } + var optionType = typeof(Option<>).MakeGenericType(valueType); return Activator.CreateInstance(optionType, value)!; } @@ -230,13 +306,32 @@ public override void WriteJson(Newtonsoft.Json.JsonWriter writer, object value, if (option.IsSet) { + // Check if the wrapped type is an enum or nullable enum + var underlyingType = Nullable.GetUnderlyingType(valueType); + var enumType = underlyingType ?? valueType; + // If the wrapped type is an enum, use the enum converter - if (valueType.IsEnum) + if (enumType.IsEnum) { var enumConverter = new NewtonsoftGenericEnumConverter(); - if (enumConverter.CanConvert(valueType)) + // Check CanConvert with the actual enum type (not nullable) + if (enumConverter.CanConvert(enumType)) { - enumConverter.WriteJson(writer, option.Value, serializer); + // For nullable enums, pass the underlying enum type to the converter + if (underlyingType != null && option.Value != null) + { + // Convert nullable enum to non-nullable for the converter + var enumValue = Convert.ChangeType(option.Value, enumType); + enumConverter.WriteJson(writer, enumValue, serializer); + } + else if (underlyingType == null) + { + enumConverter.WriteJson(writer, option.Value, serializer); + } + else + { + writer.WriteNull(); + } return; } } diff --git a/Kinde.Api/Converters/DeleteApiResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/DeleteApiResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..f59cdc7 --- /dev/null +++ b/Kinde.Api/Converters/DeleteApiResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for DeleteApiResponse that handles the Option<> structure + /// + public class DeleteApiResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override DeleteApiResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, DeleteApiResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + + return new DeleteApiResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, DeleteApiResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/DeleteEnvironmentVariableResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/DeleteEnvironmentVariableResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..22abab7 --- /dev/null +++ b/Kinde.Api/Converters/DeleteEnvironmentVariableResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for DeleteEnvironmentVariableResponse that handles the Option<> structure + /// + public class DeleteEnvironmentVariableResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override DeleteEnvironmentVariableResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, DeleteEnvironmentVariableResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + + return new DeleteEnvironmentVariableResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, DeleteEnvironmentVariableResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/DeleteRoleScopeResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/DeleteRoleScopeResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..5b29b28 --- /dev/null +++ b/Kinde.Api/Converters/DeleteRoleScopeResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for DeleteRoleScopeResponse that handles the Option<> structure + /// + public class DeleteRoleScopeResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override DeleteRoleScopeResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, DeleteRoleScopeResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new DeleteRoleScopeResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, DeleteRoleScopeResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/DeleteWebhookResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/DeleteWebhookResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..807cb9d --- /dev/null +++ b/Kinde.Api/Converters/DeleteWebhookResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for DeleteWebhookResponse that handles the Option<> structure + /// + public class DeleteWebhookResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override DeleteWebhookResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, DeleteWebhookResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new DeleteWebhookResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, DeleteWebhookResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/EnvironmentVariableNewtonsoftConverter.cs b/Kinde.Api/Converters/EnvironmentVariableNewtonsoftConverter.cs new file mode 100644 index 0000000..463a729 --- /dev/null +++ b/Kinde.Api/Converters/EnvironmentVariableNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for EnvironmentVariable that handles the Option<> structure + /// + public class EnvironmentVariableNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override EnvironmentVariable ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, EnvironmentVariable existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? value = default(string?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + bool? isSecret = default(bool?); + if (jsonObject["is_secret"] != null) + { + isSecret = jsonObject["is_secret"].ToObject(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + + return new EnvironmentVariable( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, value: value != null ? new Option(value) : default, isSecret: isSecret != null ? new Option(isSecret) : default, createdOn: createdOn != null ? new Option(createdOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, EnvironmentVariable value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + if (value.IsSecretOption.IsSet && value.IsSecret != null) + { + writer.WritePropertyName("is_secret"); + serializer.Serialize(writer, value.IsSecret); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ErrorNewtonsoftConverter.cs b/Kinde.Api/Converters/ErrorNewtonsoftConverter.cs new file mode 100644 index 0000000..95de06f --- /dev/null +++ b/Kinde.Api/Converters/ErrorNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Error that handles the Option<> structure + /// + public class ErrorNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Error ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Error existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new Error( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Error value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/EventTypeNewtonsoftConverter.cs b/Kinde.Api/Converters/EventTypeNewtonsoftConverter.cs new file mode 100644 index 0000000..2c5f8d9 --- /dev/null +++ b/Kinde.Api/Converters/EventTypeNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for EventType that handles the Option<> structure + /// + public class EventTypeNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override EventType ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, EventType existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? origin = default(string?); + if (jsonObject["origin"] != null) + { + origin = jsonObject["origin"].ToObject(); + } + Object? schema = default(Object?); + if (jsonObject["schema"] != null) + { + schema = jsonObject["schema"].ToObject(serializer); + } + + return new EventType( + id: id != null ? new Option(id) : default, code: code != null ? new Option(code) : default, name: name != null ? new Option(name) : default, origin: origin != null ? new Option(origin) : default, schema: schema != null ? new Option(schema) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, EventType value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.OriginOption.IsSet && value.Origin != null) + { + writer.WritePropertyName("origin"); + serializer.Serialize(writer, value.Origin); + } + if (value.SchemaOption.IsSet && value.Schema != null) + { + writer.WritePropertyName("schema"); + serializer.Serialize(writer, value.Schema); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiKeyResponseApiKeyNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiKeyResponseApiKeyNewtonsoftConverter.cs new file mode 100644 index 0000000..e840372 --- /dev/null +++ b/Kinde.Api/Converters/GetApiKeyResponseApiKeyNewtonsoftConverter.cs @@ -0,0 +1,190 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiKeyResponseApiKey that handles the Option<> structure + /// + public class GetApiKeyResponseApiKeyNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiKeyResponseApiKey ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiKeyResponseApiKey existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? status = default(string?); + if (jsonObject["status"] != null) + { + status = jsonObject["status"].ToObject(); + } + string? keyPrefix = default(string?); + if (jsonObject["key_prefix"] != null) + { + keyPrefix = jsonObject["key_prefix"].ToObject(); + } + string? keySuffix = default(string?); + if (jsonObject["key_suffix"] != null) + { + keySuffix = jsonObject["key_suffix"].ToObject(); + } + DateTimeOffset? createdOn = default(DateTimeOffset?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(serializer); + } + DateTimeOffset? lastVerifiedOn = default(DateTimeOffset?); + if (jsonObject["last_verified_on"] != null) + { + lastVerifiedOn = jsonObject["last_verified_on"].ToObject(serializer); + } + string? lastVerifiedIp = default(string?); + if (jsonObject["last_verified_ip"] != null) + { + lastVerifiedIp = jsonObject["last_verified_ip"].ToObject(); + } + string? createdBy = default(string?); + if (jsonObject["created_by"] != null) + { + createdBy = jsonObject["created_by"].ToObject(); + } + List apiIds = default(List); + if (jsonObject["api_ids"] != null) + { + apiIds = jsonObject["api_ids"].ToObject>(serializer); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + int? verificationCount = default(int?); + if (jsonObject["verification_count"] != null) + { + verificationCount = jsonObject["verification_count"].ToObject(serializer); + } + string? organizationId = default(string?); + if (jsonObject["organization_id"] != null) + { + organizationId = jsonObject["organization_id"].ToObject(); + } + string? userId = default(string?); + if (jsonObject["user_id"] != null) + { + userId = jsonObject["user_id"].ToObject(); + } + + return new GetApiKeyResponseApiKey( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, type: type != null ? new Option(type) : default, status: status != null ? new Option(status) : default, keyPrefix: keyPrefix != null ? new Option(keyPrefix) : default, keySuffix: keySuffix != null ? new Option(keySuffix) : default, createdOn: createdOn != null ? new Option(createdOn) : default, lastVerifiedOn: lastVerifiedOn != null ? new Option(lastVerifiedOn) : default, lastVerifiedIp: lastVerifiedIp != null ? new Option(lastVerifiedIp) : default, createdBy: createdBy != null ? new Option(createdBy) : default, apiIds: apiIds != null ? new Option?>(apiIds) : default, scopes: scopes != null ? new Option?>(scopes) : default, verificationCount: verificationCount != null ? new Option(verificationCount) : default, organizationId: organizationId != null ? new Option(organizationId) : default, userId: userId != null ? new Option(userId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiKeyResponseApiKey value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.StatusOption.IsSet && value.Status != null) + { + writer.WritePropertyName("status"); + serializer.Serialize(writer, value.Status); + } + if (value.KeyPrefixOption.IsSet && value.KeyPrefix != null) + { + writer.WritePropertyName("key_prefix"); + serializer.Serialize(writer, value.KeyPrefix); + } + if (value.KeySuffixOption.IsSet && value.KeySuffix != null) + { + writer.WritePropertyName("key_suffix"); + serializer.Serialize(writer, value.KeySuffix); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.LastVerifiedOnOption.IsSet && value.LastVerifiedOn != null) + { + writer.WritePropertyName("last_verified_on"); + serializer.Serialize(writer, value.LastVerifiedOn); + } + if (value.LastVerifiedIpOption.IsSet && value.LastVerifiedIp != null) + { + writer.WritePropertyName("last_verified_ip"); + serializer.Serialize(writer, value.LastVerifiedIp); + } + if (value.CreatedByOption.IsSet && value.CreatedBy != null) + { + writer.WritePropertyName("created_by"); + serializer.Serialize(writer, value.CreatedBy); + } + if (value.ApiIdsOption.IsSet) + { + writer.WritePropertyName("api_ids"); + serializer.Serialize(writer, value.ApiIds); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + if (value.VerificationCountOption.IsSet && value.VerificationCount != null) + { + writer.WritePropertyName("verification_count"); + serializer.Serialize(writer, value.VerificationCount); + } + if (value.OrganizationIdOption.IsSet && value.OrganizationId != null) + { + writer.WritePropertyName("organization_id"); + serializer.Serialize(writer, value.OrganizationId); + } + if (value.UserIdOption.IsSet && value.UserId != null) + { + writer.WritePropertyName("user_id"); + serializer.Serialize(writer, value.UserId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiKeyResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiKeyResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..9479f7b --- /dev/null +++ b/Kinde.Api/Converters/GetApiKeyResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiKeyResponse that handles the Option<> structure + /// + public class GetApiKeyResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiKeyResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiKeyResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetApiKeyResponseApiKey? apiKey = default(GetApiKeyResponseApiKey?); + if (jsonObject["api_key"] != null) + { + apiKey = jsonObject["api_key"].ToObject(serializer); + } + + return new GetApiKeyResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, apiKey: apiKey != null ? new Option(apiKey) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiKeyResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApiKeyOption.IsSet && value.ApiKey != null) + { + writer.WritePropertyName("api_key"); + serializer.Serialize(writer, value.ApiKey); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiKeysResponseApiKeysInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiKeysResponseApiKeysInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..8a7cccb --- /dev/null +++ b/Kinde.Api/Converters/GetApiKeysResponseApiKeysInnerNewtonsoftConverter.cs @@ -0,0 +1,160 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiKeysResponseApiKeysInner that handles the Option<> structure + /// + public class GetApiKeysResponseApiKeysInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiKeysResponseApiKeysInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiKeysResponseApiKeysInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? status = default(string?); + if (jsonObject["status"] != null) + { + status = jsonObject["status"].ToObject(); + } + string? keyPrefix = default(string?); + if (jsonObject["key_prefix"] != null) + { + keyPrefix = jsonObject["key_prefix"].ToObject(); + } + string? keySuffix = default(string?); + if (jsonObject["key_suffix"] != null) + { + keySuffix = jsonObject["key_suffix"].ToObject(); + } + DateTimeOffset? createdOn = default(DateTimeOffset?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(serializer); + } + DateTimeOffset? lastVerifiedOn = default(DateTimeOffset?); + if (jsonObject["last_verified_on"] != null) + { + lastVerifiedOn = jsonObject["last_verified_on"].ToObject(serializer); + } + string? lastVerifiedIp = default(string?); + if (jsonObject["last_verified_ip"] != null) + { + lastVerifiedIp = jsonObject["last_verified_ip"].ToObject(); + } + string? createdBy = default(string?); + if (jsonObject["created_by"] != null) + { + createdBy = jsonObject["created_by"].ToObject(); + } + List apiIds = default(List); + if (jsonObject["api_ids"] != null) + { + apiIds = jsonObject["api_ids"].ToObject>(serializer); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + + return new GetApiKeysResponseApiKeysInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, type: type != null ? new Option(type) : default, status: status != null ? new Option(status) : default, keyPrefix: keyPrefix != null ? new Option(keyPrefix) : default, keySuffix: keySuffix != null ? new Option(keySuffix) : default, createdOn: createdOn != null ? new Option(createdOn) : default, lastVerifiedOn: lastVerifiedOn != null ? new Option(lastVerifiedOn) : default, lastVerifiedIp: lastVerifiedIp != null ? new Option(lastVerifiedIp) : default, createdBy: createdBy != null ? new Option(createdBy) : default, apiIds: apiIds != null ? new Option?>(apiIds) : default, scopes: scopes != null ? new Option?>(scopes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiKeysResponseApiKeysInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.StatusOption.IsSet && value.Status != null) + { + writer.WritePropertyName("status"); + serializer.Serialize(writer, value.Status); + } + if (value.KeyPrefixOption.IsSet && value.KeyPrefix != null) + { + writer.WritePropertyName("key_prefix"); + serializer.Serialize(writer, value.KeyPrefix); + } + if (value.KeySuffixOption.IsSet && value.KeySuffix != null) + { + writer.WritePropertyName("key_suffix"); + serializer.Serialize(writer, value.KeySuffix); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.LastVerifiedOnOption.IsSet && value.LastVerifiedOn != null) + { + writer.WritePropertyName("last_verified_on"); + serializer.Serialize(writer, value.LastVerifiedOn); + } + if (value.LastVerifiedIpOption.IsSet && value.LastVerifiedIp != null) + { + writer.WritePropertyName("last_verified_ip"); + serializer.Serialize(writer, value.LastVerifiedIp); + } + if (value.CreatedByOption.IsSet && value.CreatedBy != null) + { + writer.WritePropertyName("created_by"); + serializer.Serialize(writer, value.CreatedBy); + } + if (value.ApiIdsOption.IsSet) + { + writer.WritePropertyName("api_ids"); + serializer.Serialize(writer, value.ApiIds); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiKeysResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiKeysResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..905d7ed --- /dev/null +++ b/Kinde.Api/Converters/GetApiKeysResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiKeysResponse that handles the Option<> structure + /// + public class GetApiKeysResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiKeysResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiKeysResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + List apiKeys = default(List); + if (jsonObject["api_keys"] != null) + { + apiKeys = jsonObject["api_keys"].ToObject>(serializer); + } + + return new GetApiKeysResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, hasMore: hasMore != null ? new Option(hasMore) : default, apiKeys: apiKeys != null ? new Option?>(apiKeys) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiKeysResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.ApiKeysOption.IsSet) + { + writer.WritePropertyName("api_keys"); + serializer.Serialize(writer, value.ApiKeys); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiResponseApiApplicationsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiResponseApiApplicationsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..89b607e --- /dev/null +++ b/Kinde.Api/Converters/GetApiResponseApiApplicationsInnerNewtonsoftConverter.cs @@ -0,0 +1,85 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiResponseApiApplicationsInner that handles the Option<> structure + /// + public class GetApiResponseApiApplicationsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiResponseApiApplicationsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiResponseApiApplicationsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetApiResponseApiApplicationsInner.TypeEnum? type = default(GetApiResponseApiApplicationsInner.TypeEnum?); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = GetApiResponseApiApplicationsInner.TypeEnumFromString(typeStr); + } + } + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + bool? isActive = default(bool?); + if (jsonObject["is_active"] != null) + { + isActive = jsonObject["is_active"].ToObject(serializer); + } + + return new GetApiResponseApiApplicationsInner( + type: type != null ? new Option(type) : default, id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, isActive: isActive != null ? new Option(isActive) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiResponseApiApplicationsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + var typeStr = GetApiResponseApiApplicationsInner.TypeEnumToJsonValue(value.Type.Value); + writer.WriteValue(typeStr); + } + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.IsActiveOption.IsSet && value.IsActive != null) + { + writer.WritePropertyName("is_active"); + serializer.Serialize(writer, value.IsActive); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiResponseApiNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiResponseApiNewtonsoftConverter.cs new file mode 100644 index 0000000..a115069 --- /dev/null +++ b/Kinde.Api/Converters/GetApiResponseApiNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiResponseApi that handles the Option<> structure + /// + public class GetApiResponseApiNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiResponseApi ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiResponseApi existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? audience = default(string?); + if (jsonObject["audience"] != null) + { + audience = jsonObject["audience"].ToObject(); + } + bool? isManagementApi = default(bool?); + if (jsonObject["is_management_api"] != null) + { + isManagementApi = jsonObject["is_management_api"].ToObject(serializer); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + List applications = default(List); + if (jsonObject["applications"] != null) + { + applications = jsonObject["applications"].ToObject>(serializer); + } + + return new GetApiResponseApi( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, audience: audience != null ? new Option(audience) : default, isManagementApi: isManagementApi != null ? new Option(isManagementApi) : default, scopes: scopes != null ? new Option?>(scopes) : default, applications: applications != null ? new Option?>(applications) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiResponseApi value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.AudienceOption.IsSet && value.Audience != null) + { + writer.WritePropertyName("audience"); + serializer.Serialize(writer, value.Audience); + } + if (value.IsManagementApiOption.IsSet && value.IsManagementApi != null) + { + writer.WritePropertyName("is_management_api"); + serializer.Serialize(writer, value.IsManagementApi); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + if (value.ApplicationsOption.IsSet) + { + writer.WritePropertyName("applications"); + serializer.Serialize(writer, value.Applications); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiResponseApiScopesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiResponseApiScopesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..e9548f5 --- /dev/null +++ b/Kinde.Api/Converters/GetApiResponseApiScopesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiResponseApiScopesInner that handles the Option<> structure + /// + public class GetApiResponseApiScopesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiResponseApiScopesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiResponseApiScopesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new GetApiResponseApiScopesInner( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiResponseApiScopesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..aafffb2 --- /dev/null +++ b/Kinde.Api/Converters/GetApiResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiResponse that handles the Option<> structure + /// + public class GetApiResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetApiResponseApi? api = default(GetApiResponseApi?); + if (jsonObject["api"] != null) + { + api = jsonObject["api"].ToObject(serializer); + } + + return new GetApiResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, api: api != null ? new Option(api) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApiOption.IsSet && value.Api != null) + { + writer.WritePropertyName("api"); + serializer.Serialize(writer, value.Api); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiScopeResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiScopeResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..211efd3 --- /dev/null +++ b/Kinde.Api/Converters/GetApiScopeResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiScopeResponse that handles the Option<> structure + /// + public class GetApiScopeResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiScopeResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiScopeResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetApiScopesResponseScopesInner? scope = default(GetApiScopesResponseScopesInner?); + if (jsonObject["scope"] != null) + { + scope = jsonObject["scope"].ToObject(serializer); + } + + return new GetApiScopeResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, scope: scope != null ? new Option(scope) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiScopeResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ScopeOption.IsSet && value.Scope != null) + { + writer.WritePropertyName("scope"); + serializer.Serialize(writer, value.Scope); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiScopesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiScopesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..82cf129 --- /dev/null +++ b/Kinde.Api/Converters/GetApiScopesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiScopesResponse that handles the Option<> structure + /// + public class GetApiScopesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiScopesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiScopesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + + return new GetApiScopesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, scopes: scopes != null ? new Option?>(scopes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiScopesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApiScopesResponseScopesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApiScopesResponseScopesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..7345fe9 --- /dev/null +++ b/Kinde.Api/Converters/GetApiScopesResponseScopesInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApiScopesResponseScopesInner that handles the Option<> structure + /// + public class GetApiScopesResponseScopesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApiScopesResponseScopesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApiScopesResponseScopesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + + return new GetApiScopesResponseScopesInner( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, description: description != null ? new Option(description) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApiScopesResponseScopesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApisResponseApisInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApisResponseApisInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..7a09b8e --- /dev/null +++ b/Kinde.Api/Converters/GetApisResponseApisInnerNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApisResponseApisInner that handles the Option<> structure + /// + public class GetApisResponseApisInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApisResponseApisInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApisResponseApisInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? audience = default(string?); + if (jsonObject["audience"] != null) + { + audience = jsonObject["audience"].ToObject(); + } + bool? isManagementApi = default(bool?); + if (jsonObject["is_management_api"] != null) + { + isManagementApi = jsonObject["is_management_api"].ToObject(serializer); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + + return new GetApisResponseApisInner( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, audience: audience != null ? new Option(audience) : default, isManagementApi: isManagementApi != null ? new Option(isManagementApi) : default, scopes: scopes != null ? new Option?>(scopes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApisResponseApisInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.AudienceOption.IsSet && value.Audience != null) + { + writer.WritePropertyName("audience"); + serializer.Serialize(writer, value.Audience); + } + if (value.IsManagementApiOption.IsSet && value.IsManagementApi != null) + { + writer.WritePropertyName("is_management_api"); + serializer.Serialize(writer, value.IsManagementApi); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApisResponseApisInnerScopesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApisResponseApisInnerScopesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..dd28d5b --- /dev/null +++ b/Kinde.Api/Converters/GetApisResponseApisInnerScopesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApisResponseApisInnerScopesInner that handles the Option<> structure + /// + public class GetApisResponseApisInnerScopesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApisResponseApisInnerScopesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApisResponseApisInnerScopesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new GetApisResponseApisInnerScopesInner( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApisResponseApisInnerScopesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApisResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApisResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..2f88f93 --- /dev/null +++ b/Kinde.Api/Converters/GetApisResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApisResponse that handles the Option<> structure + /// + public class GetApisResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApisResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApisResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + List apis = default(List); + if (jsonObject["apis"] != null) + { + apis = jsonObject["apis"].ToObject>(serializer); + } + + return new GetApisResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, nextToken: nextToken != null ? new Option(nextToken) : default, apis: apis != null ? new Option?>(apis) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApisResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + if (value.ApisOption.IsSet) + { + writer.WritePropertyName("apis"); + serializer.Serialize(writer, value.Apis); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApplicationResponseApplicationNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApplicationResponseApplicationNewtonsoftConverter.cs new file mode 100644 index 0000000..0afabfe --- /dev/null +++ b/Kinde.Api/Converters/GetApplicationResponseApplicationNewtonsoftConverter.cs @@ -0,0 +1,125 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApplicationResponseApplication that handles the Option<> structure + /// + public class GetApplicationResponseApplicationNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApplicationResponseApplication ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApplicationResponseApplication existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetApplicationResponseApplication.TypeEnum? type = default(GetApplicationResponseApplication.TypeEnum?); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = GetApplicationResponseApplication.TypeEnumFromString(typeStr); + } + } + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + string? loginUri = default(string?); + if (jsonObject["login_uri"] != null) + { + loginUri = jsonObject["login_uri"].ToObject(); + } + string? homepageUri = default(string?); + if (jsonObject["homepage_uri"] != null) + { + homepageUri = jsonObject["homepage_uri"].ToObject(); + } + bool? hasCancelButton = default(bool?); + if (jsonObject["has_cancel_button"] != null) + { + hasCancelButton = jsonObject["has_cancel_button"].ToObject(serializer); + } + + return new GetApplicationResponseApplication( + type: type != null ? new Option(type) : default, id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default, loginUri: loginUri != null ? new Option(loginUri) : default, homepageUri: homepageUri != null ? new Option(homepageUri) : default, hasCancelButton: hasCancelButton != null ? new Option(hasCancelButton) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApplicationResponseApplication value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + var typeStr = GetApplicationResponseApplication.TypeEnumToJsonValue(value.Type.Value); + writer.WriteValue(typeStr); + } + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + if (value.LoginUriOption.IsSet && value.LoginUri != null) + { + writer.WritePropertyName("login_uri"); + serializer.Serialize(writer, value.LoginUri); + } + if (value.HomepageUriOption.IsSet && value.HomepageUri != null) + { + writer.WritePropertyName("homepage_uri"); + serializer.Serialize(writer, value.HomepageUri); + } + if (value.HasCancelButtonOption.IsSet && value.HasCancelButton != null) + { + writer.WritePropertyName("has_cancel_button"); + serializer.Serialize(writer, value.HasCancelButton); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApplicationResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApplicationResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..53bde54 --- /dev/null +++ b/Kinde.Api/Converters/GetApplicationResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApplicationResponse that handles the Option<> structure + /// + public class GetApplicationResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApplicationResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApplicationResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetApplicationResponseApplication? application = default(GetApplicationResponseApplication?); + if (jsonObject["application"] != null) + { + application = jsonObject["application"].ToObject(serializer); + } + + return new GetApplicationResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, application: application != null ? new Option(application) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApplicationResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApplicationOption.IsSet && value.Application != null) + { + writer.WritePropertyName("application"); + serializer.Serialize(writer, value.Application); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetApplicationsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetApplicationsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..c9e466e --- /dev/null +++ b/Kinde.Api/Converters/GetApplicationsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetApplicationsResponse that handles the Option<> structure + /// + public class GetApplicationsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetApplicationsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetApplicationsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List applications = default(List); + if (jsonObject["applications"] != null) + { + applications = jsonObject["applications"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetApplicationsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, applications: applications != null ? new Option?>(applications) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetApplicationsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApplicationsOption.IsSet) + { + writer.WritePropertyName("applications"); + serializer.Serialize(writer, value.Applications); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerEntitlementsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerEntitlementsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..eda8e8c --- /dev/null +++ b/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerEntitlementsInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingAgreementsResponseAgreementsInnerEntitlementsInner that handles the Option<> structure + /// + public class GetBillingAgreementsResponseAgreementsInnerEntitlementsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingAgreementsResponseAgreementsInnerEntitlementsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingAgreementsResponseAgreementsInnerEntitlementsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? featureCode = default(string?); + if (jsonObject["feature_code"] != null) + { + featureCode = jsonObject["feature_code"].ToObject(); + } + string? entitlementId = default(string?); + if (jsonObject["entitlement_id"] != null) + { + entitlementId = jsonObject["entitlement_id"].ToObject(); + } + + return new GetBillingAgreementsResponseAgreementsInnerEntitlementsInner( + featureCode: featureCode != null ? new Option(featureCode) : default, entitlementId: entitlementId != null ? new Option(entitlementId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingAgreementsResponseAgreementsInnerEntitlementsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.FeatureCodeOption.IsSet && value.FeatureCode != null) + { + writer.WritePropertyName("feature_code"); + serializer.Serialize(writer, value.FeatureCode); + } + if (value.EntitlementIdOption.IsSet && value.EntitlementId != null) + { + writer.WritePropertyName("entitlement_id"); + serializer.Serialize(writer, value.EntitlementId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..0fc6d6c --- /dev/null +++ b/Kinde.Api/Converters/GetBillingAgreementsResponseAgreementsInnerNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingAgreementsResponseAgreementsInner that handles the Option<> structure + /// + public class GetBillingAgreementsResponseAgreementsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingAgreementsResponseAgreementsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingAgreementsResponseAgreementsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? planCode = default(string?); + if (jsonObject["plan_code"] != null) + { + planCode = jsonObject["plan_code"].ToObject(); + } + DateTimeOffset? expiresOn = default(DateTimeOffset?); + if (jsonObject["expires_on"] != null) + { + expiresOn = jsonObject["expires_on"].ToObject(serializer); + } + string? billingGroupId = default(string?); + if (jsonObject["billing_group_id"] != null) + { + billingGroupId = jsonObject["billing_group_id"].ToObject(); + } + List entitlements = default(List); + if (jsonObject["entitlements"] != null) + { + entitlements = jsonObject["entitlements"].ToObject>(serializer); + } + + return new GetBillingAgreementsResponseAgreementsInner( + id: id != null ? new Option(id) : default, planCode: planCode != null ? new Option(planCode) : default, expiresOn: expiresOn != null ? new Option(expiresOn) : default, billingGroupId: billingGroupId != null ? new Option(billingGroupId) : default, entitlements: entitlements != null ? new Option?>(entitlements) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingAgreementsResponseAgreementsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.PlanCodeOption.IsSet && value.PlanCode != null) + { + writer.WritePropertyName("plan_code"); + serializer.Serialize(writer, value.PlanCode); + } + if (value.ExpiresOnOption.IsSet && value.ExpiresOn != null) + { + writer.WritePropertyName("expires_on"); + serializer.Serialize(writer, value.ExpiresOn); + } + if (value.BillingGroupIdOption.IsSet && value.BillingGroupId != null) + { + writer.WritePropertyName("billing_group_id"); + serializer.Serialize(writer, value.BillingGroupId); + } + if (value.EntitlementsOption.IsSet) + { + writer.WritePropertyName("entitlements"); + serializer.Serialize(writer, value.Entitlements); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingAgreementsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingAgreementsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..a2c8b3c --- /dev/null +++ b/Kinde.Api/Converters/GetBillingAgreementsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingAgreementsResponse that handles the Option<> structure + /// + public class GetBillingAgreementsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingAgreementsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingAgreementsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + List agreements = default(List); + if (jsonObject["agreements"] != null) + { + agreements = jsonObject["agreements"].ToObject>(serializer); + } + + return new GetBillingAgreementsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, hasMore: hasMore != null ? new Option(hasMore) : default, agreements: agreements != null ? new Option?>(agreements) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingAgreementsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.AgreementsOption.IsSet) + { + writer.WritePropertyName("agreements"); + serializer.Serialize(writer, value.Agreements); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingEntitlementsResponseEntitlementsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingEntitlementsResponseEntitlementsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..52c7403 --- /dev/null +++ b/Kinde.Api/Converters/GetBillingEntitlementsResponseEntitlementsInnerNewtonsoftConverter.cs @@ -0,0 +1,120 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingEntitlementsResponseEntitlementsInner that handles the Option<> structure + /// + public class GetBillingEntitlementsResponseEntitlementsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingEntitlementsResponseEntitlementsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingEntitlementsResponseEntitlementsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + int? fixedCharge = default(int?); + if (jsonObject["fixed_charge"] != null) + { + fixedCharge = jsonObject["fixed_charge"].ToObject(serializer); + } + string? priceName = default(string?); + if (jsonObject["price_name"] != null) + { + priceName = jsonObject["price_name"].ToObject(); + } + int? unitAmount = default(int?); + if (jsonObject["unit_amount"] != null) + { + unitAmount = jsonObject["unit_amount"].ToObject(serializer); + } + string? featureCode = default(string?); + if (jsonObject["feature_code"] != null) + { + featureCode = jsonObject["feature_code"].ToObject(); + } + string? featureName = default(string?); + if (jsonObject["feature_name"] != null) + { + featureName = jsonObject["feature_name"].ToObject(); + } + int? entitlementLimitMax = default(int?); + if (jsonObject["entitlement_limit_max"] != null) + { + entitlementLimitMax = jsonObject["entitlement_limit_max"].ToObject(serializer); + } + int? entitlementLimitMin = default(int?); + if (jsonObject["entitlement_limit_min"] != null) + { + entitlementLimitMin = jsonObject["entitlement_limit_min"].ToObject(serializer); + } + + return new GetBillingEntitlementsResponseEntitlementsInner( + id: id != null ? new Option(id) : default, fixedCharge: fixedCharge != null ? new Option(fixedCharge) : default, priceName: priceName != null ? new Option(priceName) : default, unitAmount: unitAmount != null ? new Option(unitAmount) : default, featureCode: featureCode != null ? new Option(featureCode) : default, featureName: featureName != null ? new Option(featureName) : default, entitlementLimitMax: entitlementLimitMax != null ? new Option(entitlementLimitMax) : default, entitlementLimitMin: entitlementLimitMin != null ? new Option(entitlementLimitMin) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingEntitlementsResponseEntitlementsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.FixedChargeOption.IsSet && value.FixedCharge != null) + { + writer.WritePropertyName("fixed_charge"); + serializer.Serialize(writer, value.FixedCharge); + } + if (value.PriceNameOption.IsSet && value.PriceName != null) + { + writer.WritePropertyName("price_name"); + serializer.Serialize(writer, value.PriceName); + } + if (value.UnitAmountOption.IsSet && value.UnitAmount != null) + { + writer.WritePropertyName("unit_amount"); + serializer.Serialize(writer, value.UnitAmount); + } + if (value.FeatureCodeOption.IsSet && value.FeatureCode != null) + { + writer.WritePropertyName("feature_code"); + serializer.Serialize(writer, value.FeatureCode); + } + if (value.FeatureNameOption.IsSet && value.FeatureName != null) + { + writer.WritePropertyName("feature_name"); + serializer.Serialize(writer, value.FeatureName); + } + if (value.EntitlementLimitMaxOption.IsSet && value.EntitlementLimitMax != null) + { + writer.WritePropertyName("entitlement_limit_max"); + serializer.Serialize(writer, value.EntitlementLimitMax); + } + if (value.EntitlementLimitMinOption.IsSet && value.EntitlementLimitMin != null) + { + writer.WritePropertyName("entitlement_limit_min"); + serializer.Serialize(writer, value.EntitlementLimitMin); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingEntitlementsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingEntitlementsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..dcf2e73 --- /dev/null +++ b/Kinde.Api/Converters/GetBillingEntitlementsResponseNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingEntitlementsResponse that handles the Option<> structure + /// + public class GetBillingEntitlementsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingEntitlementsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingEntitlementsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + List entitlements = default(List); + if (jsonObject["entitlements"] != null) + { + entitlements = jsonObject["entitlements"].ToObject>(serializer); + } + List plans = default(List); + if (jsonObject["plans"] != null) + { + plans = jsonObject["plans"].ToObject>(serializer); + } + + return new GetBillingEntitlementsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, hasMore: hasMore != null ? new Option(hasMore) : default, entitlements: entitlements != null ? new Option?>(entitlements) : default, plans: plans != null ? new Option?>(plans) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingEntitlementsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.EntitlementsOption.IsSet) + { + writer.WritePropertyName("entitlements"); + serializer.Serialize(writer, value.Entitlements); + } + if (value.PlansOption.IsSet) + { + writer.WritePropertyName("plans"); + serializer.Serialize(writer, value.Plans); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBillingEntitlementsResponsePlansInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBillingEntitlementsResponsePlansInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..91e5b77 --- /dev/null +++ b/Kinde.Api/Converters/GetBillingEntitlementsResponsePlansInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBillingEntitlementsResponsePlansInner that handles the Option<> structure + /// + public class GetBillingEntitlementsResponsePlansInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBillingEntitlementsResponsePlansInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBillingEntitlementsResponsePlansInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + DateTimeOffset? subscribedOn = default(DateTimeOffset?); + if (jsonObject["subscribed_on"] != null) + { + subscribedOn = jsonObject["subscribed_on"].ToObject(serializer); + } + + return new GetBillingEntitlementsResponsePlansInner( + code: code != null ? new Option(code) : default, subscribedOn: subscribedOn != null ? new Option(subscribedOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBillingEntitlementsResponsePlansInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.SubscribedOnOption.IsSet && value.SubscribedOn != null) + { + writer.WritePropertyName("subscribed_on"); + serializer.Serialize(writer, value.SubscribedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBusinessResponseBusinessNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBusinessResponseBusinessNewtonsoftConverter.cs new file mode 100644 index 0000000..b27b7e7 --- /dev/null +++ b/Kinde.Api/Converters/GetBusinessResponseBusinessNewtonsoftConverter.cs @@ -0,0 +1,150 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBusinessResponseBusiness that handles the Option<> structure + /// + public class GetBusinessResponseBusinessNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBusinessResponseBusiness ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBusinessResponseBusiness existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? phone = default(string?); + if (jsonObject["phone"] != null) + { + phone = jsonObject["phone"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? industry = default(string?); + if (jsonObject["industry"] != null) + { + industry = jsonObject["industry"].ToObject(); + } + string? timezone = default(string?); + if (jsonObject["timezone"] != null) + { + timezone = jsonObject["timezone"].ToObject(); + } + string? privacyUrl = default(string?); + if (jsonObject["privacy_url"] != null) + { + privacyUrl = jsonObject["privacy_url"].ToObject(); + } + string? termsUrl = default(string?); + if (jsonObject["terms_url"] != null) + { + termsUrl = jsonObject["terms_url"].ToObject(); + } + bool? hasClickwrap = default(bool?); + if (jsonObject["has_clickwrap"] != null) + { + hasClickwrap = jsonObject["has_clickwrap"].ToObject(serializer); + } + bool? hasKindeBranding = default(bool?); + if (jsonObject["has_kinde_branding"] != null) + { + hasKindeBranding = jsonObject["has_kinde_branding"].ToObject(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + + return new GetBusinessResponseBusiness( + code: code != null ? new Option(code) : default, name: name != null ? new Option(name) : default, phone: phone != null ? new Option(phone) : default, email: email != null ? new Option(email) : default, industry: industry != null ? new Option(industry) : default, timezone: timezone != null ? new Option(timezone) : default, privacyUrl: privacyUrl != null ? new Option(privacyUrl) : default, termsUrl: termsUrl != null ? new Option(termsUrl) : default, hasClickwrap: hasClickwrap != null ? new Option(hasClickwrap) : default, hasKindeBranding: hasKindeBranding != null ? new Option(hasKindeBranding) : default, createdOn: createdOn != null ? new Option(createdOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBusinessResponseBusiness value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.PhoneOption.IsSet && value.Phone != null) + { + writer.WritePropertyName("phone"); + serializer.Serialize(writer, value.Phone); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.IndustryOption.IsSet && value.Industry != null) + { + writer.WritePropertyName("industry"); + serializer.Serialize(writer, value.Industry); + } + if (value.TimezoneOption.IsSet && value.Timezone != null) + { + writer.WritePropertyName("timezone"); + serializer.Serialize(writer, value.Timezone); + } + if (value.PrivacyUrlOption.IsSet && value.PrivacyUrl != null) + { + writer.WritePropertyName("privacy_url"); + serializer.Serialize(writer, value.PrivacyUrl); + } + if (value.TermsUrlOption.IsSet && value.TermsUrl != null) + { + writer.WritePropertyName("terms_url"); + serializer.Serialize(writer, value.TermsUrl); + } + if (value.HasClickwrapOption.IsSet && value.HasClickwrap != null) + { + writer.WritePropertyName("has_clickwrap"); + serializer.Serialize(writer, value.HasClickwrap); + } + if (value.HasKindeBrandingOption.IsSet && value.HasKindeBranding != null) + { + writer.WritePropertyName("has_kinde_branding"); + serializer.Serialize(writer, value.HasKindeBranding); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetBusinessResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetBusinessResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..318fdba --- /dev/null +++ b/Kinde.Api/Converters/GetBusinessResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetBusinessResponse that handles the Option<> structure + /// + public class GetBusinessResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetBusinessResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetBusinessResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetBusinessResponseBusiness? business = default(GetBusinessResponseBusiness?); + if (jsonObject["business"] != null) + { + business = jsonObject["business"].ToObject(serializer); + } + + return new GetBusinessResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, business: business != null ? new Option(business) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetBusinessResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.BusinessOption.IsSet && value.Business != null) + { + writer.WritePropertyName("business"); + serializer.Serialize(writer, value.Business); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetCategoriesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetCategoriesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..31f17ef --- /dev/null +++ b/Kinde.Api/Converters/GetCategoriesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetCategoriesResponse that handles the Option<> structure + /// + public class GetCategoriesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetCategoriesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetCategoriesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List categories = default(List); + if (jsonObject["categories"] != null) + { + categories = jsonObject["categories"].ToObject>(serializer); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + + return new GetCategoriesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, categories: categories != null ? new Option?>(categories) : default, hasMore: hasMore != null ? new Option(hasMore) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetCategoriesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CategoriesOption.IsSet) + { + writer.WritePropertyName("categories"); + serializer.Serialize(writer, value.Categories); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetConnectionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetConnectionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..adf4e10 --- /dev/null +++ b/Kinde.Api/Converters/GetConnectionsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetConnectionsResponse that handles the Option<> structure + /// + public class GetConnectionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetConnectionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetConnectionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List connections = default(List); + if (jsonObject["connections"] != null) + { + connections = jsonObject["connections"].ToObject>(serializer); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + + return new GetConnectionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, connections: connections != null ? new Option?>(connections) : default, hasMore: hasMore != null ? new Option(hasMore) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetConnectionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ConnectionsOption.IsSet) + { + writer.WritePropertyName("connections"); + serializer.Serialize(writer, value.Connections); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentFeatureFlagsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentFeatureFlagsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..2ee1054 --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentFeatureFlagsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentFeatureFlagsResponse that handles the Option<> structure + /// + public class GetEnvironmentFeatureFlagsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentFeatureFlagsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentFeatureFlagsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + Dictionary featureFlags = default(Dictionary); + if (jsonObject["feature_flags"] != null) + { + featureFlags = jsonObject["feature_flags"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetEnvironmentFeatureFlagsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, featureFlags: featureFlags != null ? new Option>(featureFlags) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentFeatureFlagsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.FeatureFlagsOption.IsSet) + { + writer.WritePropertyName("feature_flags"); + serializer.Serialize(writer, value.FeatureFlags); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentBackgroundColorNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentBackgroundColorNewtonsoftConverter.cs new file mode 100644 index 0000000..d3c9cc0 --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentBackgroundColorNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentResponseEnvironmentBackgroundColor that handles the Option<> structure + /// + public class GetEnvironmentResponseEnvironmentBackgroundColorNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentResponseEnvironmentBackgroundColor ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentResponseEnvironmentBackgroundColor existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? raw = default(string?); + if (jsonObject["raw"] != null) + { + raw = jsonObject["raw"].ToObject(); + } + string? hex = default(string?); + if (jsonObject["hex"] != null) + { + hex = jsonObject["hex"].ToObject(); + } + string? hsl = default(string?); + if (jsonObject["hsl"] != null) + { + hsl = jsonObject["hsl"].ToObject(); + } + + return new GetEnvironmentResponseEnvironmentBackgroundColor( + raw: raw != null ? new Option(raw) : default, hex: hex != null ? new Option(hex) : default, hsl: hsl != null ? new Option(hsl) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentResponseEnvironmentBackgroundColor value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.RawOption.IsSet && value.Raw != null) + { + writer.WritePropertyName("raw"); + serializer.Serialize(writer, value.Raw); + } + if (value.HexOption.IsSet && value.Hex != null) + { + writer.WritePropertyName("hex"); + serializer.Serialize(writer, value.Hex); + } + if (value.HslOption.IsSet && value.Hsl != null) + { + writer.WritePropertyName("hsl"); + serializer.Serialize(writer, value.Hsl); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentLinkColorNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentLinkColorNewtonsoftConverter.cs new file mode 100644 index 0000000..c0c830a --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentLinkColorNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentResponseEnvironmentLinkColor that handles the Option<> structure + /// + public class GetEnvironmentResponseEnvironmentLinkColorNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentResponseEnvironmentLinkColor ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentResponseEnvironmentLinkColor existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? raw = default(string?); + if (jsonObject["raw"] != null) + { + raw = jsonObject["raw"].ToObject(); + } + string? hex = default(string?); + if (jsonObject["hex"] != null) + { + hex = jsonObject["hex"].ToObject(); + } + string? hsl = default(string?); + if (jsonObject["hsl"] != null) + { + hsl = jsonObject["hsl"].ToObject(); + } + + return new GetEnvironmentResponseEnvironmentLinkColor( + raw: raw != null ? new Option(raw) : default, hex: hex != null ? new Option(hex) : default, hsl: hsl != null ? new Option(hsl) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentResponseEnvironmentLinkColor value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.RawOption.IsSet && value.Raw != null) + { + writer.WritePropertyName("raw"); + serializer.Serialize(writer, value.Raw); + } + if (value.HexOption.IsSet && value.Hex != null) + { + writer.WritePropertyName("hex"); + serializer.Serialize(writer, value.Hex); + } + if (value.HslOption.IsSet && value.Hsl != null) + { + writer.WritePropertyName("hsl"); + serializer.Serialize(writer, value.Hsl); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentNewtonsoftConverter.cs new file mode 100644 index 0000000..1ecb50b --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentResponseEnvironmentNewtonsoftConverter.cs @@ -0,0 +1,310 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentResponseEnvironment that handles the Option<> structure + /// + public class GetEnvironmentResponseEnvironmentNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentResponseEnvironment ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentResponseEnvironment existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetEnvironmentResponseEnvironment.ThemeCodeEnum? themeCode = default(GetEnvironmentResponseEnvironment.ThemeCodeEnum?); + if (jsonObject["theme_code"] != null) + { + var themeCodeStr = jsonObject["theme_code"].ToObject(); + if (!string.IsNullOrEmpty(themeCodeStr)) + { + themeCode = GetEnvironmentResponseEnvironment.ThemeCodeEnumFromString(themeCodeStr); + } + } + GetEnvironmentResponseEnvironment.ColorSchemeEnum? colorScheme = default(GetEnvironmentResponseEnvironment.ColorSchemeEnum?); + if (jsonObject["color_scheme"] != null) + { + var colorSchemeStr = jsonObject["color_scheme"].ToObject(); + if (!string.IsNullOrEmpty(colorSchemeStr)) + { + colorScheme = GetEnvironmentResponseEnvironment.ColorSchemeEnumFromString(colorSchemeStr); + } + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? hotjarSiteId = default(string?); + if (jsonObject["hotjar_site_id"] != null) + { + hotjarSiteId = jsonObject["hotjar_site_id"].ToObject(); + } + string? googleAnalyticsTag = default(string?); + if (jsonObject["google_analytics_tag"] != null) + { + googleAnalyticsTag = jsonObject["google_analytics_tag"].ToObject(); + } + bool? isDefault = default(bool?); + if (jsonObject["is_default"] != null) + { + isDefault = jsonObject["is_default"].ToObject(serializer); + } + bool? isLive = default(bool?); + if (jsonObject["is_live"] != null) + { + isLive = jsonObject["is_live"].ToObject(serializer); + } + string? kindeDomain = default(string?); + if (jsonObject["kinde_domain"] != null) + { + kindeDomain = jsonObject["kinde_domain"].ToObject(); + } + string? customDomain = default(string?); + if (jsonObject["custom_domain"] != null) + { + customDomain = jsonObject["custom_domain"].ToObject(); + } + string? logo = default(string?); + if (jsonObject["logo"] != null) + { + logo = jsonObject["logo"].ToObject(); + } + string? logoDark = default(string?); + if (jsonObject["logo_dark"] != null) + { + logoDark = jsonObject["logo_dark"].ToObject(); + } + string? faviconSvg = default(string?); + if (jsonObject["favicon_svg"] != null) + { + faviconSvg = jsonObject["favicon_svg"].ToObject(); + } + string? faviconFallback = default(string?); + if (jsonObject["favicon_fallback"] != null) + { + faviconFallback = jsonObject["favicon_fallback"].ToObject(); + } + GetEnvironmentResponseEnvironmentLinkColor? linkColor = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["link_color"] != null) + { + linkColor = jsonObject["link_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentBackgroundColor? backgroundColor = default(GetEnvironmentResponseEnvironmentBackgroundColor?); + if (jsonObject["background_color"] != null) + { + backgroundColor = jsonObject["background_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonColor = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_color"] != null) + { + buttonColor = jsonObject["button_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentBackgroundColor? buttonTextColor = default(GetEnvironmentResponseEnvironmentBackgroundColor?); + if (jsonObject["button_text_color"] != null) + { + buttonTextColor = jsonObject["button_text_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? linkColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["link_color_dark"] != null) + { + linkColorDark = jsonObject["link_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? backgroundColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["background_color_dark"] != null) + { + backgroundColorDark = jsonObject["background_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonTextColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_text_color_dark"] != null) + { + buttonTextColorDark = jsonObject["button_text_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_color_dark"] != null) + { + buttonColorDark = jsonObject["button_color_dark"].ToObject(serializer); + } + int? buttonBorderRadius = default(int?); + if (jsonObject["button_border_radius"] != null) + { + buttonBorderRadius = jsonObject["button_border_radius"].ToObject(serializer); + } + int? cardBorderRadius = default(int?); + if (jsonObject["card_border_radius"] != null) + { + cardBorderRadius = jsonObject["card_border_radius"].ToObject(serializer); + } + int? inputBorderRadius = default(int?); + if (jsonObject["input_border_radius"] != null) + { + inputBorderRadius = jsonObject["input_border_radius"].ToObject(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + + return new GetEnvironmentResponseEnvironment( + themeCode: themeCode != null ? new Option(themeCode) : default, colorScheme: colorScheme != null ? new Option(colorScheme) : default, code: code != null ? new Option(code) : default, name: name != null ? new Option(name) : default, hotjarSiteId: hotjarSiteId != null ? new Option(hotjarSiteId) : default, googleAnalyticsTag: googleAnalyticsTag != null ? new Option(googleAnalyticsTag) : default, isDefault: isDefault != null ? new Option(isDefault) : default, isLive: isLive != null ? new Option(isLive) : default, kindeDomain: kindeDomain != null ? new Option(kindeDomain) : default, customDomain: customDomain != null ? new Option(customDomain) : default, logo: logo != null ? new Option(logo) : default, logoDark: logoDark != null ? new Option(logoDark) : default, faviconSvg: faviconSvg != null ? new Option(faviconSvg) : default, faviconFallback: faviconFallback != null ? new Option(faviconFallback) : default, linkColor: linkColor != null ? new Option(linkColor) : default, backgroundColor: backgroundColor != null ? new Option(backgroundColor) : default, buttonColor: buttonColor != null ? new Option(buttonColor) : default, buttonTextColor: buttonTextColor != null ? new Option(buttonTextColor) : default, linkColorDark: linkColorDark != null ? new Option(linkColorDark) : default, backgroundColorDark: backgroundColorDark != null ? new Option(backgroundColorDark) : default, buttonTextColorDark: buttonTextColorDark != null ? new Option(buttonTextColorDark) : default, buttonColorDark: buttonColorDark != null ? new Option(buttonColorDark) : default, buttonBorderRadius: buttonBorderRadius != null ? new Option(buttonBorderRadius) : default, cardBorderRadius: cardBorderRadius != null ? new Option(cardBorderRadius) : default, inputBorderRadius: inputBorderRadius != null ? new Option(inputBorderRadius) : default, createdOn: createdOn != null ? new Option(createdOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentResponseEnvironment value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ThemeCodeOption.IsSet && value.ThemeCode != null) + { + writer.WritePropertyName("theme_code"); + var themeCodeStr = GetEnvironmentResponseEnvironment.ThemeCodeEnumToJsonValue(value.ThemeCode.Value); + writer.WriteValue(themeCodeStr); + } + if (value.ColorSchemeOption.IsSet && value.ColorScheme != null) + { + writer.WritePropertyName("color_scheme"); + var colorSchemeStr = GetEnvironmentResponseEnvironment.ColorSchemeEnumToJsonValue(value.ColorScheme.Value); + writer.WriteValue(colorSchemeStr); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.HotjarSiteIdOption.IsSet && value.HotjarSiteId != null) + { + writer.WritePropertyName("hotjar_site_id"); + serializer.Serialize(writer, value.HotjarSiteId); + } + if (value.GoogleAnalyticsTagOption.IsSet && value.GoogleAnalyticsTag != null) + { + writer.WritePropertyName("google_analytics_tag"); + serializer.Serialize(writer, value.GoogleAnalyticsTag); + } + if (value.IsDefaultOption.IsSet && value.IsDefault != null) + { + writer.WritePropertyName("is_default"); + serializer.Serialize(writer, value.IsDefault); + } + if (value.IsLiveOption.IsSet && value.IsLive != null) + { + writer.WritePropertyName("is_live"); + serializer.Serialize(writer, value.IsLive); + } + if (value.KindeDomainOption.IsSet && value.KindeDomain != null) + { + writer.WritePropertyName("kinde_domain"); + serializer.Serialize(writer, value.KindeDomain); + } + if (value.CustomDomainOption.IsSet && value.CustomDomain != null) + { + writer.WritePropertyName("custom_domain"); + serializer.Serialize(writer, value.CustomDomain); + } + if (value.LogoOption.IsSet && value.Logo != null) + { + writer.WritePropertyName("logo"); + serializer.Serialize(writer, value.Logo); + } + if (value.LogoDarkOption.IsSet && value.LogoDark != null) + { + writer.WritePropertyName("logo_dark"); + serializer.Serialize(writer, value.LogoDark); + } + if (value.FaviconSvgOption.IsSet && value.FaviconSvg != null) + { + writer.WritePropertyName("favicon_svg"); + serializer.Serialize(writer, value.FaviconSvg); + } + if (value.FaviconFallbackOption.IsSet && value.FaviconFallback != null) + { + writer.WritePropertyName("favicon_fallback"); + serializer.Serialize(writer, value.FaviconFallback); + } + if (value.LinkColorOption.IsSet && value.LinkColor != null) + { + writer.WritePropertyName("link_color"); + serializer.Serialize(writer, value.LinkColor); + } + if (value.BackgroundColorOption.IsSet && value.BackgroundColor != null) + { + writer.WritePropertyName("background_color"); + serializer.Serialize(writer, value.BackgroundColor); + } + if (value.ButtonColorOption.IsSet && value.ButtonColor != null) + { + writer.WritePropertyName("button_color"); + serializer.Serialize(writer, value.ButtonColor); + } + if (value.ButtonTextColorOption.IsSet && value.ButtonTextColor != null) + { + writer.WritePropertyName("button_text_color"); + serializer.Serialize(writer, value.ButtonTextColor); + } + if (value.LinkColorDarkOption.IsSet && value.LinkColorDark != null) + { + writer.WritePropertyName("link_color_dark"); + serializer.Serialize(writer, value.LinkColorDark); + } + if (value.BackgroundColorDarkOption.IsSet && value.BackgroundColorDark != null) + { + writer.WritePropertyName("background_color_dark"); + serializer.Serialize(writer, value.BackgroundColorDark); + } + if (value.ButtonTextColorDarkOption.IsSet && value.ButtonTextColorDark != null) + { + writer.WritePropertyName("button_text_color_dark"); + serializer.Serialize(writer, value.ButtonTextColorDark); + } + if (value.ButtonColorDarkOption.IsSet && value.ButtonColorDark != null) + { + writer.WritePropertyName("button_color_dark"); + serializer.Serialize(writer, value.ButtonColorDark); + } + if (value.ButtonBorderRadiusOption.IsSet && value.ButtonBorderRadius != null) + { + writer.WritePropertyName("button_border_radius"); + serializer.Serialize(writer, value.ButtonBorderRadius); + } + if (value.CardBorderRadiusOption.IsSet && value.CardBorderRadius != null) + { + writer.WritePropertyName("card_border_radius"); + serializer.Serialize(writer, value.CardBorderRadius); + } + if (value.InputBorderRadiusOption.IsSet && value.InputBorderRadius != null) + { + writer.WritePropertyName("input_border_radius"); + serializer.Serialize(writer, value.InputBorderRadius); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..6f64835 --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentResponse that handles the Option<> structure + /// + public class GetEnvironmentResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetEnvironmentResponseEnvironment? varEnvironment = default(GetEnvironmentResponseEnvironment?); + if (jsonObject["environment"] != null) + { + varEnvironment = jsonObject["environment"].ToObject(serializer); + } + + return new GetEnvironmentResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, varEnvironment: varEnvironment != null ? new Option(varEnvironment) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.VarEnvironmentOption.IsSet && value.VarEnvironment != null) + { + writer.WritePropertyName("environment"); + serializer.Serialize(writer, value.VarEnvironment); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentVariableResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentVariableResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..83e2122 --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentVariableResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentVariableResponse that handles the Option<> structure + /// + public class GetEnvironmentVariableResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentVariableResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentVariableResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + EnvironmentVariable? environmentVariable = default(EnvironmentVariable?); + if (jsonObject["environment_variable"] != null) + { + environmentVariable = jsonObject["environment_variable"].ToObject(serializer); + } + + return new GetEnvironmentVariableResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, environmentVariable: environmentVariable != null ? new Option(environmentVariable) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentVariableResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.EnvironmentVariableOption.IsSet && value.EnvironmentVariable != null) + { + writer.WritePropertyName("environment_variable"); + serializer.Serialize(writer, value.EnvironmentVariable); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEnvironmentVariablesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEnvironmentVariablesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..343a279 --- /dev/null +++ b/Kinde.Api/Converters/GetEnvironmentVariablesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEnvironmentVariablesResponse that handles the Option<> structure + /// + public class GetEnvironmentVariablesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEnvironmentVariablesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEnvironmentVariablesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + List environmentVariables = default(List); + if (jsonObject["environment_variables"] != null) + { + environmentVariables = jsonObject["environment_variables"].ToObject>(serializer); + } + + return new GetEnvironmentVariablesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, hasMore: hasMore != null ? new Option(hasMore) : default, environmentVariables: environmentVariables != null ? new Option?>(environmentVariables) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEnvironmentVariablesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.EnvironmentVariablesOption.IsSet) + { + writer.WritePropertyName("environment_variables"); + serializer.Serialize(writer, value.EnvironmentVariables); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEventResponseEventNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEventResponseEventNewtonsoftConverter.cs new file mode 100644 index 0000000..f23d62e --- /dev/null +++ b/Kinde.Api/Converters/GetEventResponseEventNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEventResponseEvent that handles the Option<> structure + /// + public class GetEventResponseEventNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEventResponseEvent ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEventResponseEvent existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? source = default(string?); + if (jsonObject["source"] != null) + { + source = jsonObject["source"].ToObject(); + } + string? eventId = default(string?); + if (jsonObject["event_id"] != null) + { + eventId = jsonObject["event_id"].ToObject(); + } + int? timestamp = default(int?); + if (jsonObject["timestamp"] != null) + { + timestamp = jsonObject["timestamp"].ToObject(serializer); + } + Object? data = default(Object?); + if (jsonObject["data"] != null) + { + data = jsonObject["data"].ToObject(serializer); + } + + return new GetEventResponseEvent( + type: type != null ? new Option(type) : default, source: source != null ? new Option(source) : default, eventId: eventId != null ? new Option(eventId) : default, timestamp: timestamp != null ? new Option(timestamp) : default, data: data != null ? new Option(data) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEventResponseEvent value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.SourceOption.IsSet && value.Source != null) + { + writer.WritePropertyName("source"); + serializer.Serialize(writer, value.Source); + } + if (value.EventIdOption.IsSet && value.EventId != null) + { + writer.WritePropertyName("event_id"); + serializer.Serialize(writer, value.EventId); + } + if (value.TimestampOption.IsSet && value.Timestamp != null) + { + writer.WritePropertyName("timestamp"); + serializer.Serialize(writer, value.Timestamp); + } + if (value.DataOption.IsSet && value.Data != null) + { + writer.WritePropertyName("data"); + serializer.Serialize(writer, value.Data); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEventResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEventResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..29c2c0c --- /dev/null +++ b/Kinde.Api/Converters/GetEventResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEventResponse that handles the Option<> structure + /// + public class GetEventResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEventResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEventResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetEventResponseEvent @event = default(GetEventResponseEvent); + if (jsonObject["event"] != null) + { + @event = jsonObject["event"].ToObject(serializer); + } + + return new GetEventResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, @event: @event != null ? new Option(@event) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEventResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.EventOption.IsSet) + { + writer.WritePropertyName("event"); + serializer.Serialize(writer, value.Event); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetEventTypesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetEventTypesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..c11a275 --- /dev/null +++ b/Kinde.Api/Converters/GetEventTypesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetEventTypesResponse that handles the Option<> structure + /// + public class GetEventTypesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetEventTypesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetEventTypesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List eventTypes = default(List); + if (jsonObject["event_types"] != null) + { + eventTypes = jsonObject["event_types"].ToObject>(serializer); + } + + return new GetEventTypesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, eventTypes: eventTypes != null ? new Option?>(eventTypes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetEventTypesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.EventTypesOption.IsSet) + { + writer.WritePropertyName("event_types"); + serializer.Serialize(writer, value.EventTypes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetIdentitiesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetIdentitiesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..94523c9 --- /dev/null +++ b/Kinde.Api/Converters/GetIdentitiesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetIdentitiesResponse that handles the Option<> structure + /// + public class GetIdentitiesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetIdentitiesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetIdentitiesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + + return new GetIdentitiesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, identities: identities != null ? new Option?>(identities) : default, hasMore: hasMore != null ? new Option(hasMore) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetIdentitiesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.IdentitiesOption.IsSet) + { + writer.WritePropertyName("identities"); + serializer.Serialize(writer, value.Identities); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetIndustriesResponseIndustriesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetIndustriesResponseIndustriesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..dec6c8e --- /dev/null +++ b/Kinde.Api/Converters/GetIndustriesResponseIndustriesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetIndustriesResponseIndustriesInner that handles the Option<> structure + /// + public class GetIndustriesResponseIndustriesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetIndustriesResponseIndustriesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetIndustriesResponseIndustriesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new GetIndustriesResponseIndustriesInner( + key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetIndustriesResponseIndustriesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetIndustriesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetIndustriesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..e23116d --- /dev/null +++ b/Kinde.Api/Converters/GetIndustriesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetIndustriesResponse that handles the Option<> structure + /// + public class GetIndustriesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetIndustriesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetIndustriesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List industries = default(List); + if (jsonObject["industries"] != null) + { + industries = jsonObject["industries"].ToObject>(serializer); + } + + return new GetIndustriesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, industries: industries != null ? new Option?>(industries) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetIndustriesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.IndustriesOption.IsSet) + { + writer.WritePropertyName("industries"); + serializer.Serialize(writer, value.Industries); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseFeatureFlagsValueNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseFeatureFlagsValueNewtonsoftConverter.cs new file mode 100644 index 0000000..e39c5b8 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseFeatureFlagsValueNewtonsoftConverter.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationFeatureFlagsResponseFeatureFlagsValue that handles the Option<> structure + /// + public class GetOrganizationFeatureFlagsResponseFeatureFlagsValueNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationFeatureFlagsResponseFeatureFlagsValue ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationFeatureFlagsResponseFeatureFlagsValue existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetOrganizationFeatureFlagsResponseFeatureFlagsValue.TypeEnum? type = default(GetOrganizationFeatureFlagsResponseFeatureFlagsValue.TypeEnum?); + if (jsonObject["type"] != null) + { + var typeStr = jsonObject["type"].ToObject(); + if (!string.IsNullOrEmpty(typeStr)) + { + type = GetOrganizationFeatureFlagsResponseFeatureFlagsValue.TypeEnumFromString(typeStr); + } + } + string? value = default(string?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + + return new GetOrganizationFeatureFlagsResponseFeatureFlagsValue( + type: type != null ? new Option(type) : default, value: value != null ? new Option(value) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationFeatureFlagsResponseFeatureFlagsValue value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + var typeStr = GetOrganizationFeatureFlagsResponseFeatureFlagsValue.TypeEnumToJsonValue(value.Type.Value); + writer.WriteValue(typeStr); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..e367794 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationFeatureFlagsResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationFeatureFlagsResponse that handles the Option<> structure + /// + public class GetOrganizationFeatureFlagsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationFeatureFlagsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationFeatureFlagsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + Dictionary featureFlags = default(Dictionary); + if (jsonObject["feature_flags"] != null) + { + featureFlags = jsonObject["feature_flags"].ToObject>(serializer); + } + + return new GetOrganizationFeatureFlagsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, featureFlags: featureFlags != null ? new Option>(featureFlags) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationFeatureFlagsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.FeatureFlagsOption.IsSet) + { + writer.WritePropertyName("feature_flags"); + serializer.Serialize(writer, value.FeatureFlags); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationResponseBillingAgreementsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationResponseBillingAgreementsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..59cede5 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationResponseBillingAgreementsInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationResponseBillingAgreementsInner that handles the Option<> structure + /// + public class GetOrganizationResponseBillingAgreementsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationResponseBillingAgreementsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationResponseBillingAgreementsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? planCode = default(string?); + if (jsonObject["plan_code"] != null) + { + planCode = jsonObject["plan_code"].ToObject(); + } + string? agreementId = default(string?); + if (jsonObject["agreement_id"] != null) + { + agreementId = jsonObject["agreement_id"].ToObject(); + } + + return new GetOrganizationResponseBillingAgreementsInner( + planCode: planCode != null ? new Option(planCode) : default, agreementId: agreementId != null ? new Option(agreementId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationResponseBillingAgreementsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.PlanCodeOption.IsSet && value.PlanCode != null) + { + writer.WritePropertyName("plan_code"); + serializer.Serialize(writer, value.PlanCode); + } + if (value.AgreementIdOption.IsSet && value.AgreementId != null) + { + writer.WritePropertyName("agreement_id"); + serializer.Serialize(writer, value.AgreementId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationResponseBillingNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationResponseBillingNewtonsoftConverter.cs new file mode 100644 index 0000000..7563e84 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationResponseBillingNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationResponseBilling that handles the Option<> structure + /// + public class GetOrganizationResponseBillingNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationResponseBilling ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationResponseBilling existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? billingCustomerId = default(string?); + if (jsonObject["billing_customer_id"] != null) + { + billingCustomerId = jsonObject["billing_customer_id"].ToObject(); + } + List agreements = default(List); + if (jsonObject["agreements"] != null) + { + agreements = jsonObject["agreements"].ToObject>(serializer); + } + + return new GetOrganizationResponseBilling( + billingCustomerId: billingCustomerId != null ? new Option(billingCustomerId) : default, agreements: agreements != null ? new Option?>(agreements) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationResponseBilling value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.BillingCustomerIdOption.IsSet && value.BillingCustomerId != null) + { + writer.WritePropertyName("billing_customer_id"); + serializer.Serialize(writer, value.BillingCustomerId); + } + if (value.AgreementsOption.IsSet) + { + writer.WritePropertyName("agreements"); + serializer.Serialize(writer, value.Agreements); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..6933be7 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationResponseNewtonsoftConverter.cs @@ -0,0 +1,320 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationResponse that handles the Option<> structure + /// + public class GetOrganizationResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + GetOrganizationResponse.ThemeCodeEnum? themeCode = default(GetOrganizationResponse.ThemeCodeEnum?); + if (jsonObject["theme_code"] != null) + { + var themeCodeStr = jsonObject["theme_code"].ToObject(); + if (!string.IsNullOrEmpty(themeCodeStr)) + { + themeCode = GetOrganizationResponse.ThemeCodeEnumFromString(themeCodeStr); + } + } + GetOrganizationResponse.ColorSchemeEnum? colorScheme = default(GetOrganizationResponse.ColorSchemeEnum?); + if (jsonObject["color_scheme"] != null) + { + var colorSchemeStr = jsonObject["color_scheme"].ToObject(); + if (!string.IsNullOrEmpty(colorSchemeStr)) + { + colorScheme = GetOrganizationResponse.ColorSchemeEnumFromString(colorSchemeStr); + } + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? handle = default(string?); + if (jsonObject["handle"] != null) + { + handle = jsonObject["handle"].ToObject(); + } + bool? isDefault = default(bool?); + if (jsonObject["is_default"] != null) + { + isDefault = jsonObject["is_default"].ToObject(serializer); + } + string? externalId = default(string?); + if (jsonObject["external_id"] != null) + { + externalId = jsonObject["external_id"].ToObject(); + } + bool? isAutoMembershipEnabled = default(bool?); + if (jsonObject["is_auto_membership_enabled"] != null) + { + isAutoMembershipEnabled = jsonObject["is_auto_membership_enabled"].ToObject(serializer); + } + string? logo = default(string?); + if (jsonObject["logo"] != null) + { + logo = jsonObject["logo"].ToObject(); + } + string? logoDark = default(string?); + if (jsonObject["logo_dark"] != null) + { + logoDark = jsonObject["logo_dark"].ToObject(); + } + string? faviconSvg = default(string?); + if (jsonObject["favicon_svg"] != null) + { + faviconSvg = jsonObject["favicon_svg"].ToObject(); + } + string? faviconFallback = default(string?); + if (jsonObject["favicon_fallback"] != null) + { + faviconFallback = jsonObject["favicon_fallback"].ToObject(); + } + GetEnvironmentResponseEnvironmentLinkColor? linkColor = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["link_color"] != null) + { + linkColor = jsonObject["link_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentBackgroundColor? backgroundColor = default(GetEnvironmentResponseEnvironmentBackgroundColor?); + if (jsonObject["background_color"] != null) + { + backgroundColor = jsonObject["background_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonColor = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_color"] != null) + { + buttonColor = jsonObject["button_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentBackgroundColor? buttonTextColor = default(GetEnvironmentResponseEnvironmentBackgroundColor?); + if (jsonObject["button_text_color"] != null) + { + buttonTextColor = jsonObject["button_text_color"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? linkColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["link_color_dark"] != null) + { + linkColorDark = jsonObject["link_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? backgroundColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["background_color_dark"] != null) + { + backgroundColorDark = jsonObject["background_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonTextColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_text_color_dark"] != null) + { + buttonTextColorDark = jsonObject["button_text_color_dark"].ToObject(serializer); + } + GetEnvironmentResponseEnvironmentLinkColor? buttonColorDark = default(GetEnvironmentResponseEnvironmentLinkColor?); + if (jsonObject["button_color_dark"] != null) + { + buttonColorDark = jsonObject["button_color_dark"].ToObject(serializer); + } + int? buttonBorderRadius = default(int?); + if (jsonObject["button_border_radius"] != null) + { + buttonBorderRadius = jsonObject["button_border_radius"].ToObject(serializer); + } + int? cardBorderRadius = default(int?); + if (jsonObject["card_border_radius"] != null) + { + cardBorderRadius = jsonObject["card_border_radius"].ToObject(serializer); + } + int? inputBorderRadius = default(int?); + if (jsonObject["input_border_radius"] != null) + { + inputBorderRadius = jsonObject["input_border_radius"].ToObject(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + string? senderName = default(string?); + if (jsonObject["sender_name"] != null) + { + senderName = jsonObject["sender_name"].ToObject(); + } + string? senderEmail = default(string?); + if (jsonObject["sender_email"] != null) + { + senderEmail = jsonObject["sender_email"].ToObject(); + } + GetOrganizationResponseBilling? billing = default(GetOrganizationResponseBilling?); + if (jsonObject["billing"] != null) + { + billing = jsonObject["billing"].ToObject(serializer); + } + + return new GetOrganizationResponse( + themeCode: themeCode != null ? new Option(themeCode) : default, colorScheme: colorScheme != null ? new Option(colorScheme) : default, code: code != null ? new Option(code) : default, name: name != null ? new Option(name) : default, handle: handle != null ? new Option(handle) : default, isDefault: isDefault != null ? new Option(isDefault) : default, externalId: externalId != null ? new Option(externalId) : default, isAutoMembershipEnabled: isAutoMembershipEnabled != null ? new Option(isAutoMembershipEnabled) : default, logo: logo != null ? new Option(logo) : default, logoDark: logoDark != null ? new Option(logoDark) : default, faviconSvg: faviconSvg != null ? new Option(faviconSvg) : default, faviconFallback: faviconFallback != null ? new Option(faviconFallback) : default, linkColor: linkColor != null ? new Option(linkColor) : default, backgroundColor: backgroundColor != null ? new Option(backgroundColor) : default, buttonColor: buttonColor != null ? new Option(buttonColor) : default, buttonTextColor: buttonTextColor != null ? new Option(buttonTextColor) : default, linkColorDark: linkColorDark != null ? new Option(linkColorDark) : default, backgroundColorDark: backgroundColorDark != null ? new Option(backgroundColorDark) : default, buttonTextColorDark: buttonTextColorDark != null ? new Option(buttonTextColorDark) : default, buttonColorDark: buttonColorDark != null ? new Option(buttonColorDark) : default, buttonBorderRadius: buttonBorderRadius != null ? new Option(buttonBorderRadius) : default, cardBorderRadius: cardBorderRadius != null ? new Option(cardBorderRadius) : default, inputBorderRadius: inputBorderRadius != null ? new Option(inputBorderRadius) : default, createdOn: createdOn != null ? new Option(createdOn) : default, senderName: senderName != null ? new Option(senderName) : default, senderEmail: senderEmail != null ? new Option(senderEmail) : default, billing: billing != null ? new Option(billing) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ThemeCodeOption.IsSet && value.ThemeCode != null) + { + writer.WritePropertyName("theme_code"); + var themeCodeStr = GetOrganizationResponse.ThemeCodeEnumToJsonValue(value.ThemeCode.Value); + writer.WriteValue(themeCodeStr); + } + if (value.ColorSchemeOption.IsSet && value.ColorScheme != null) + { + writer.WritePropertyName("color_scheme"); + var colorSchemeStr = GetOrganizationResponse.ColorSchemeEnumToJsonValue(value.ColorScheme.Value); + writer.WriteValue(colorSchemeStr); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.HandleOption.IsSet && value.Handle != null) + { + writer.WritePropertyName("handle"); + serializer.Serialize(writer, value.Handle); + } + if (value.IsDefaultOption.IsSet && value.IsDefault != null) + { + writer.WritePropertyName("is_default"); + serializer.Serialize(writer, value.IsDefault); + } + if (value.ExternalIdOption.IsSet && value.ExternalId != null) + { + writer.WritePropertyName("external_id"); + serializer.Serialize(writer, value.ExternalId); + } + if (value.IsAutoMembershipEnabledOption.IsSet && value.IsAutoMembershipEnabled != null) + { + writer.WritePropertyName("is_auto_membership_enabled"); + serializer.Serialize(writer, value.IsAutoMembershipEnabled); + } + if (value.LogoOption.IsSet && value.Logo != null) + { + writer.WritePropertyName("logo"); + serializer.Serialize(writer, value.Logo); + } + if (value.LogoDarkOption.IsSet && value.LogoDark != null) + { + writer.WritePropertyName("logo_dark"); + serializer.Serialize(writer, value.LogoDark); + } + if (value.FaviconSvgOption.IsSet && value.FaviconSvg != null) + { + writer.WritePropertyName("favicon_svg"); + serializer.Serialize(writer, value.FaviconSvg); + } + if (value.FaviconFallbackOption.IsSet && value.FaviconFallback != null) + { + writer.WritePropertyName("favicon_fallback"); + serializer.Serialize(writer, value.FaviconFallback); + } + if (value.LinkColorOption.IsSet && value.LinkColor != null) + { + writer.WritePropertyName("link_color"); + serializer.Serialize(writer, value.LinkColor); + } + if (value.BackgroundColorOption.IsSet && value.BackgroundColor != null) + { + writer.WritePropertyName("background_color"); + serializer.Serialize(writer, value.BackgroundColor); + } + if (value.ButtonColorOption.IsSet && value.ButtonColor != null) + { + writer.WritePropertyName("button_color"); + serializer.Serialize(writer, value.ButtonColor); + } + if (value.ButtonTextColorOption.IsSet && value.ButtonTextColor != null) + { + writer.WritePropertyName("button_text_color"); + serializer.Serialize(writer, value.ButtonTextColor); + } + if (value.LinkColorDarkOption.IsSet && value.LinkColorDark != null) + { + writer.WritePropertyName("link_color_dark"); + serializer.Serialize(writer, value.LinkColorDark); + } + if (value.BackgroundColorDarkOption.IsSet && value.BackgroundColorDark != null) + { + writer.WritePropertyName("background_color_dark"); + serializer.Serialize(writer, value.BackgroundColorDark); + } + if (value.ButtonTextColorDarkOption.IsSet && value.ButtonTextColorDark != null) + { + writer.WritePropertyName("button_text_color_dark"); + serializer.Serialize(writer, value.ButtonTextColorDark); + } + if (value.ButtonColorDarkOption.IsSet && value.ButtonColorDark != null) + { + writer.WritePropertyName("button_color_dark"); + serializer.Serialize(writer, value.ButtonColorDark); + } + if (value.ButtonBorderRadiusOption.IsSet && value.ButtonBorderRadius != null) + { + writer.WritePropertyName("button_border_radius"); + serializer.Serialize(writer, value.ButtonBorderRadius); + } + if (value.CardBorderRadiusOption.IsSet && value.CardBorderRadius != null) + { + writer.WritePropertyName("card_border_radius"); + serializer.Serialize(writer, value.CardBorderRadius); + } + if (value.InputBorderRadiusOption.IsSet && value.InputBorderRadius != null) + { + writer.WritePropertyName("input_border_radius"); + serializer.Serialize(writer, value.InputBorderRadius); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.SenderNameOption.IsSet && value.SenderName != null) + { + writer.WritePropertyName("sender_name"); + serializer.Serialize(writer, value.SenderName); + } + if (value.SenderEmailOption.IsSet && value.SenderEmail != null) + { + writer.WritePropertyName("sender_email"); + serializer.Serialize(writer, value.SenderEmail); + } + if (value.BillingOption.IsSet && value.Billing != null) + { + writer.WritePropertyName("billing"); + serializer.Serialize(writer, value.Billing); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationUsersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationUsersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..6eaa03e --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationUsersResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationUsersResponse that handles the Option<> structure + /// + public class GetOrganizationUsersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationUsersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationUsersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List organizationUsers = default(List); + if (jsonObject["organization_users"] != null) + { + organizationUsers = jsonObject["organization_users"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetOrganizationUsersResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, organizationUsers: organizationUsers != null ? new Option?>(organizationUsers) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationUsersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.OrganizationUsersOption.IsSet) + { + writer.WritePropertyName("organization_users"); + serializer.Serialize(writer, value.OrganizationUsers); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..6174b49 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationsResponse that handles the Option<> structure + /// + public class GetOrganizationsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List organizations = default(List); + if (jsonObject["organizations"] != null) + { + organizations = jsonObject["organizations"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetOrganizationsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, organizations: organizations != null ? new Option?>(organizations) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.OrganizationsOption.IsSet) + { + writer.WritePropertyName("organizations"); + serializer.Serialize(writer, value.Organizations); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationsUserPermissionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationsUserPermissionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..c445cd8 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationsUserPermissionsResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationsUserPermissionsResponse that handles the Option<> structure + /// + public class GetOrganizationsUserPermissionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationsUserPermissionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationsUserPermissionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + + return new GetOrganizationsUserPermissionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, permissions: permissions != null ? new Option?>(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationsUserPermissionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetOrganizationsUserRolesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetOrganizationsUserRolesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..01f81a4 --- /dev/null +++ b/Kinde.Api/Converters/GetOrganizationsUserRolesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetOrganizationsUserRolesResponse that handles the Option<> structure + /// + public class GetOrganizationsUserRolesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetOrganizationsUserRolesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetOrganizationsUserRolesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetOrganizationsUserRolesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, roles: roles != null ? new Option?>(roles) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetOrganizationsUserRolesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetPermissionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetPermissionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..0dd66c2 --- /dev/null +++ b/Kinde.Api/Converters/GetPermissionsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetPermissionsResponse that handles the Option<> structure + /// + public class GetPermissionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetPermissionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetPermissionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetPermissionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, permissions: permissions != null ? new Option?>(permissions) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetPermissionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetPropertiesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetPropertiesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..14c5e84 --- /dev/null +++ b/Kinde.Api/Converters/GetPropertiesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetPropertiesResponse that handles the Option<> structure + /// + public class GetPropertiesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetPropertiesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetPropertiesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List properties = default(List); + if (jsonObject["properties"] != null) + { + properties = jsonObject["properties"].ToObject>(serializer); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + + return new GetPropertiesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, properties: properties != null ? new Option?>(properties) : default, hasMore: hasMore != null ? new Option(hasMore) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetPropertiesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PropertiesOption.IsSet) + { + writer.WritePropertyName("properties"); + serializer.Serialize(writer, value.Properties); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetPropertyValuesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetPropertyValuesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..605acc0 --- /dev/null +++ b/Kinde.Api/Converters/GetPropertyValuesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetPropertyValuesResponse that handles the Option<> structure + /// + public class GetPropertyValuesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetPropertyValuesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetPropertyValuesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List properties = default(List); + if (jsonObject["properties"] != null) + { + properties = jsonObject["properties"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetPropertyValuesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, properties: properties != null ? new Option?>(properties) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetPropertyValuesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PropertiesOption.IsSet) + { + writer.WritePropertyName("properties"); + serializer.Serialize(writer, value.Properties); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetRedirectCallbackUrlsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetRedirectCallbackUrlsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..b816968 --- /dev/null +++ b/Kinde.Api/Converters/GetRedirectCallbackUrlsResponseNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetRedirectCallbackUrlsResponse that handles the Option<> structure + /// + public class GetRedirectCallbackUrlsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetRedirectCallbackUrlsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetRedirectCallbackUrlsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List redirectUrls = default(List); + if (jsonObject["redirect_urls"] != null) + { + redirectUrls = jsonObject["redirect_urls"].ToObject>(serializer); + } + + return new GetRedirectCallbackUrlsResponse( + redirectUrls: redirectUrls != null ? new Option?>(redirectUrls) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetRedirectCallbackUrlsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.RedirectUrlsOption.IsSet) + { + writer.WritePropertyName("redirect_urls"); + serializer.Serialize(writer, value.RedirectUrls); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetRoleResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetRoleResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..a677466 --- /dev/null +++ b/Kinde.Api/Converters/GetRoleResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetRoleResponse that handles the Option<> structure + /// + public class GetRoleResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetRoleResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetRoleResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + GetRoleResponseRole? role = default(GetRoleResponseRole?); + if (jsonObject["role"] != null) + { + role = jsonObject["role"].ToObject(serializer); + } + + return new GetRoleResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, role: role != null ? new Option(role) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetRoleResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.RoleOption.IsSet && value.Role != null) + { + writer.WritePropertyName("role"); + serializer.Serialize(writer, value.Role); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetRoleResponseRoleNewtonsoftConverter.cs b/Kinde.Api/Converters/GetRoleResponseRoleNewtonsoftConverter.cs new file mode 100644 index 0000000..26672ce --- /dev/null +++ b/Kinde.Api/Converters/GetRoleResponseRoleNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetRoleResponseRole that handles the Option<> structure + /// + public class GetRoleResponseRoleNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetRoleResponseRole ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetRoleResponseRole existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + bool? isDefaultRole = default(bool?); + if (jsonObject["is_default_role"] != null) + { + isDefaultRole = jsonObject["is_default_role"].ToObject(serializer); + } + + return new GetRoleResponseRole( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, isDefaultRole: isDefaultRole != null ? new Option(isDefaultRole) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetRoleResponseRole value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.IsDefaultRoleOption.IsSet && value.IsDefaultRole != null) + { + writer.WritePropertyName("is_default_role"); + serializer.Serialize(writer, value.IsDefaultRole); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetRolesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetRolesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..1ce1984 --- /dev/null +++ b/Kinde.Api/Converters/GetRolesResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetRolesResponse that handles the Option<> structure + /// + public class GetRolesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetRolesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetRolesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetRolesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, roles: roles != null ? new Option?>(roles) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetRolesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetSubscriberResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetSubscriberResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..0d1c418 --- /dev/null +++ b/Kinde.Api/Converters/GetSubscriberResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetSubscriberResponse that handles the Option<> structure + /// + public class GetSubscriberResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetSubscriberResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetSubscriberResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List subscribers = default(List); + if (jsonObject["subscribers"] != null) + { + subscribers = jsonObject["subscribers"].ToObject>(serializer); + } + + return new GetSubscriberResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, subscribers: subscribers != null ? new Option?>(subscribers) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetSubscriberResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.SubscribersOption.IsSet) + { + writer.WritePropertyName("subscribers"); + serializer.Serialize(writer, value.Subscribers); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetSubscribersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetSubscribersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..7a7b736 --- /dev/null +++ b/Kinde.Api/Converters/GetSubscribersResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetSubscribersResponse that handles the Option<> structure + /// + public class GetSubscribersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetSubscribersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetSubscribersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List subscribers = default(List); + if (jsonObject["subscribers"] != null) + { + subscribers = jsonObject["subscribers"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new GetSubscribersResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, subscribers: subscribers != null ? new Option?>(subscribers) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetSubscribersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.SubscribersOption.IsSet) + { + writer.WritePropertyName("subscribers"); + serializer.Serialize(writer, value.Subscribers); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetTimezonesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetTimezonesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..b56b1a8 --- /dev/null +++ b/Kinde.Api/Converters/GetTimezonesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetTimezonesResponse that handles the Option<> structure + /// + public class GetTimezonesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetTimezonesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetTimezonesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List timezones = default(List); + if (jsonObject["timezones"] != null) + { + timezones = jsonObject["timezones"].ToObject>(serializer); + } + + return new GetTimezonesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, timezones: timezones != null ? new Option?>(timezones) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetTimezonesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.TimezonesOption.IsSet) + { + writer.WritePropertyName("timezones"); + serializer.Serialize(writer, value.Timezones); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetTimezonesResponseTimezonesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetTimezonesResponseTimezonesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..a69ae1e --- /dev/null +++ b/Kinde.Api/Converters/GetTimezonesResponseTimezonesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetTimezonesResponseTimezonesInner that handles the Option<> structure + /// + public class GetTimezonesResponseTimezonesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetTimezonesResponseTimezonesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetTimezonesResponseTimezonesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new GetTimezonesResponseTimezonesInner( + key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetTimezonesResponseTimezonesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetUserMfaResponseMfaNewtonsoftConverter.cs b/Kinde.Api/Converters/GetUserMfaResponseMfaNewtonsoftConverter.cs new file mode 100644 index 0000000..1e07f75 --- /dev/null +++ b/Kinde.Api/Converters/GetUserMfaResponseMfaNewtonsoftConverter.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserMfaResponseMfa that handles the Option<> structure + /// + public class GetUserMfaResponseMfaNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserMfaResponseMfa ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserMfaResponseMfa existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + DateTimeOffset? createdOn = default(DateTimeOffset?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(serializer); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + bool? isVerified = default(bool?); + if (jsonObject["is_verified"] != null) + { + isVerified = jsonObject["is_verified"].ToObject(serializer); + } + int? usageCount = default(int?); + if (jsonObject["usage_count"] != null) + { + usageCount = jsonObject["usage_count"].ToObject(serializer); + } + DateTimeOffset? lastUsedOn = default(DateTimeOffset?); + if (jsonObject["last_used_on"] != null) + { + lastUsedOn = jsonObject["last_used_on"].ToObject(serializer); + } + + return new GetUserMfaResponseMfa( + id: id != null ? new Option(id) : default, type: type != null ? new Option(type) : default, createdOn: createdOn != null ? new Option(createdOn) : default, name: name != null ? new Option(name) : default, isVerified: isVerified != null ? new Option(isVerified) : default, usageCount: usageCount != null ? new Option(usageCount) : default, lastUsedOn: lastUsedOn != null ? new Option(lastUsedOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserMfaResponseMfa value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.IsVerifiedOption.IsSet && value.IsVerified != null) + { + writer.WritePropertyName("is_verified"); + serializer.Serialize(writer, value.IsVerified); + } + if (value.UsageCountOption.IsSet && value.UsageCount != null) + { + writer.WritePropertyName("usage_count"); + serializer.Serialize(writer, value.UsageCount); + } + if (value.LastUsedOnOption.IsSet && value.LastUsedOn != null) + { + writer.WritePropertyName("last_used_on"); + serializer.Serialize(writer, value.LastUsedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetUserMfaResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetUserMfaResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..1a80b71 --- /dev/null +++ b/Kinde.Api/Converters/GetUserMfaResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserMfaResponse that handles the Option<> structure + /// + public class GetUserMfaResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserMfaResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserMfaResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + GetUserMfaResponseMfa? mfa = default(GetUserMfaResponseMfa?); + if (jsonObject["mfa"] != null) + { + mfa = jsonObject["mfa"].ToObject(serializer); + } + + return new GetUserMfaResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, mfa: mfa != null ? new Option(mfa) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserMfaResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MfaOption.IsSet && value.Mfa != null) + { + writer.WritePropertyName("mfa"); + serializer.Serialize(writer, value.Mfa); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetUserSessionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetUserSessionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..9c0a0dc --- /dev/null +++ b/Kinde.Api/Converters/GetUserSessionsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserSessionsResponse that handles the Option<> structure + /// + public class GetUserSessionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserSessionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserSessionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? hasMore = default(bool?); + if (jsonObject["has_more"] != null) + { + hasMore = jsonObject["has_more"].ToObject(serializer); + } + List sessions = default(List); + if (jsonObject["sessions"] != null) + { + sessions = jsonObject["sessions"].ToObject>(serializer); + } + + return new GetUserSessionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, hasMore: hasMore != null ? new Option(hasMore) : default, sessions: sessions != null ? new Option?>(sessions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserSessionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.HasMoreOption.IsSet && value.HasMore != null) + { + writer.WritePropertyName("has_more"); + serializer.Serialize(writer, value.HasMore); + } + if (value.SessionsOption.IsSet) + { + writer.WritePropertyName("sessions"); + serializer.Serialize(writer, value.Sessions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetUserSessionsResponseSessionsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/GetUserSessionsResponseSessionsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..4e5c93d --- /dev/null +++ b/Kinde.Api/Converters/GetUserSessionsResponseSessionsInnerNewtonsoftConverter.cs @@ -0,0 +1,160 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetUserSessionsResponseSessionsInner that handles the Option<> structure + /// + public class GetUserSessionsResponseSessionsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetUserSessionsResponseSessionsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetUserSessionsResponseSessionsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? userId = default(string?); + if (jsonObject["user_id"] != null) + { + userId = jsonObject["user_id"].ToObject(); + } + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + DateTimeOffset? expiresOn = default(DateTimeOffset?); + if (jsonObject["expires_on"] != null) + { + expiresOn = jsonObject["expires_on"].ToObject(serializer); + } + string? sessionId = default(string?); + if (jsonObject["session_id"] != null) + { + sessionId = jsonObject["session_id"].ToObject(); + } + DateTimeOffset? startedOn = default(DateTimeOffset?); + if (jsonObject["started_on"] != null) + { + startedOn = jsonObject["started_on"].ToObject(serializer); + } + DateTimeOffset? updatedOn = default(DateTimeOffset?); + if (jsonObject["updated_on"] != null) + { + updatedOn = jsonObject["updated_on"].ToObject(serializer); + } + string? connectionId = default(string?); + if (jsonObject["connection_id"] != null) + { + connectionId = jsonObject["connection_id"].ToObject(); + } + string? lastIpAddress = default(string?); + if (jsonObject["last_ip_address"] != null) + { + lastIpAddress = jsonObject["last_ip_address"].ToObject(); + } + string? lastUserAgent = default(string?); + if (jsonObject["last_user_agent"] != null) + { + lastUserAgent = jsonObject["last_user_agent"].ToObject(); + } + string? initialIpAddress = default(string?); + if (jsonObject["initial_ip_address"] != null) + { + initialIpAddress = jsonObject["initial_ip_address"].ToObject(); + } + string? initialUserAgent = default(string?); + if (jsonObject["initial_user_agent"] != null) + { + initialUserAgent = jsonObject["initial_user_agent"].ToObject(); + } + + return new GetUserSessionsResponseSessionsInner( + userId: userId != null ? new Option(userId) : default, orgCode: orgCode != null ? new Option(orgCode) : default, clientId: clientId != null ? new Option(clientId) : default, expiresOn: expiresOn != null ? new Option(expiresOn) : default, sessionId: sessionId != null ? new Option(sessionId) : default, startedOn: startedOn != null ? new Option(startedOn) : default, updatedOn: updatedOn != null ? new Option(updatedOn) : default, connectionId: connectionId != null ? new Option(connectionId) : default, lastIpAddress: lastIpAddress != null ? new Option(lastIpAddress) : default, lastUserAgent: lastUserAgent != null ? new Option(lastUserAgent) : default, initialIpAddress: initialIpAddress != null ? new Option(initialIpAddress) : default, initialUserAgent: initialUserAgent != null ? new Option(initialUserAgent) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetUserSessionsResponseSessionsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UserIdOption.IsSet && value.UserId != null) + { + writer.WritePropertyName("user_id"); + serializer.Serialize(writer, value.UserId); + } + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ExpiresOnOption.IsSet && value.ExpiresOn != null) + { + writer.WritePropertyName("expires_on"); + serializer.Serialize(writer, value.ExpiresOn); + } + if (value.SessionIdOption.IsSet && value.SessionId != null) + { + writer.WritePropertyName("session_id"); + serializer.Serialize(writer, value.SessionId); + } + if (value.StartedOnOption.IsSet && value.StartedOn != null) + { + writer.WritePropertyName("started_on"); + serializer.Serialize(writer, value.StartedOn); + } + if (value.UpdatedOnOption.IsSet && value.UpdatedOn != null) + { + writer.WritePropertyName("updated_on"); + serializer.Serialize(writer, value.UpdatedOn); + } + if (value.ConnectionIdOption.IsSet && value.ConnectionId != null) + { + writer.WritePropertyName("connection_id"); + serializer.Serialize(writer, value.ConnectionId); + } + if (value.LastIpAddressOption.IsSet && value.LastIpAddress != null) + { + writer.WritePropertyName("last_ip_address"); + serializer.Serialize(writer, value.LastIpAddress); + } + if (value.LastUserAgentOption.IsSet && value.LastUserAgent != null) + { + writer.WritePropertyName("last_user_agent"); + serializer.Serialize(writer, value.LastUserAgent); + } + if (value.InitialIpAddressOption.IsSet && value.InitialIpAddress != null) + { + writer.WritePropertyName("initial_ip_address"); + serializer.Serialize(writer, value.InitialIpAddress); + } + if (value.InitialUserAgentOption.IsSet && value.InitialUserAgent != null) + { + writer.WritePropertyName("initial_user_agent"); + serializer.Serialize(writer, value.InitialUserAgent); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/GetWebhooksResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/GetWebhooksResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..20b14e7 --- /dev/null +++ b/Kinde.Api/Converters/GetWebhooksResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for GetWebhooksResponse that handles the Option<> structure + /// + public class GetWebhooksResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override GetWebhooksResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, GetWebhooksResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List webhooks = default(List); + if (jsonObject["webhooks"] != null) + { + webhooks = jsonObject["webhooks"].ToObject>(serializer); + } + + return new GetWebhooksResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, webhooks: webhooks != null ? new Option?>(webhooks) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, GetWebhooksResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.WebhooksOption.IsSet) + { + writer.WritePropertyName("webhooks"); + serializer.Serialize(writer, value.Webhooks); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/IdentityNewtonsoftConverter.cs b/Kinde.Api/Converters/IdentityNewtonsoftConverter.cs new file mode 100644 index 0000000..327540f --- /dev/null +++ b/Kinde.Api/Converters/IdentityNewtonsoftConverter.cs @@ -0,0 +1,130 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Identity that handles the Option<> structure + /// + public class IdentityNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Identity ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Identity existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + bool? isConfirmed = default(bool?); + if (jsonObject["is_confirmed"] != null) + { + isConfirmed = jsonObject["is_confirmed"].ToObject(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + string? lastLoginOn = default(string?); + if (jsonObject["last_login_on"] != null) + { + lastLoginOn = jsonObject["last_login_on"].ToObject(); + } + int? totalLogins = default(int?); + if (jsonObject["total_logins"] != null) + { + totalLogins = jsonObject["total_logins"].ToObject(serializer); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + bool? isPrimary = default(bool?); + if (jsonObject["is_primary"] != null) + { + isPrimary = jsonObject["is_primary"].ToObject(serializer); + } + + return new Identity( + id: id != null ? new Option(id) : default, type: type != null ? new Option(type) : default, isConfirmed: isConfirmed != null ? new Option(isConfirmed) : default, createdOn: createdOn != null ? new Option(createdOn) : default, lastLoginOn: lastLoginOn != null ? new Option(lastLoginOn) : default, totalLogins: totalLogins != null ? new Option(totalLogins) : default, name: name != null ? new Option(name) : default, email: email != null ? new Option(email) : default, isPrimary: isPrimary != null ? new Option(isPrimary) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Identity value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.IsConfirmedOption.IsSet && value.IsConfirmed != null) + { + writer.WritePropertyName("is_confirmed"); + serializer.Serialize(writer, value.IsConfirmed); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.LastLoginOnOption.IsSet && value.LastLoginOn != null) + { + writer.WritePropertyName("last_login_on"); + serializer.Serialize(writer, value.LastLoginOn); + } + if (value.TotalLoginsOption.IsSet && value.TotalLogins != null) + { + writer.WritePropertyName("total_logins"); + serializer.Serialize(writer, value.TotalLogins); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.IsPrimaryOption.IsSet && value.IsPrimary != null) + { + writer.WritePropertyName("is_primary"); + serializer.Serialize(writer, value.IsPrimary); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/LogoutRedirectUrlsNewtonsoftConverter.cs b/Kinde.Api/Converters/LogoutRedirectUrlsNewtonsoftConverter.cs new file mode 100644 index 0000000..47f74de --- /dev/null +++ b/Kinde.Api/Converters/LogoutRedirectUrlsNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for LogoutRedirectUrls that handles the Option<> structure + /// + public class LogoutRedirectUrlsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override LogoutRedirectUrls ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, LogoutRedirectUrls existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List logoutUrls = default(List); + if (jsonObject["logout_urls"] != null) + { + logoutUrls = jsonObject["logout_urls"].ToObject>(serializer); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new LogoutRedirectUrls( + logoutUrls: logoutUrls != null ? new Option?>(logoutUrls) : default, code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, LogoutRedirectUrls value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.LogoutUrlsOption.IsSet) + { + writer.WritePropertyName("logout_urls"); + serializer.Serialize(writer, value.LogoutUrls); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/NotFoundResponseErrorsNewtonsoftConverter.cs b/Kinde.Api/Converters/NotFoundResponseErrorsNewtonsoftConverter.cs new file mode 100644 index 0000000..dcf8579 --- /dev/null +++ b/Kinde.Api/Converters/NotFoundResponseErrorsNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for NotFoundResponseErrors that handles the Option<> structure + /// + public class NotFoundResponseErrorsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override NotFoundResponseErrors ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, NotFoundResponseErrors existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new NotFoundResponseErrors( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, NotFoundResponseErrors value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/NotFoundResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/NotFoundResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..2d3f9b5 --- /dev/null +++ b/Kinde.Api/Converters/NotFoundResponseNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for NotFoundResponse that handles the Option<> structure + /// + public class NotFoundResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override NotFoundResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, NotFoundResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + NotFoundResponseErrors? errors = default(NotFoundResponseErrors?); + if (jsonObject["errors"] != null) + { + errors = jsonObject["errors"].ToObject(serializer); + } + + return new NotFoundResponse( + errors: errors != null ? new Option(errors) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, NotFoundResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ErrorsOption.IsSet && value.Errors != null) + { + writer.WritePropertyName("errors"); + serializer.Serialize(writer, value.Errors); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationItemSchemaNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationItemSchemaNewtonsoftConverter.cs new file mode 100644 index 0000000..ea2461b --- /dev/null +++ b/Kinde.Api/Converters/OrganizationItemSchemaNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationItemSchema that handles the Option<> structure + /// + public class OrganizationItemSchemaNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationItemSchema ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationItemSchema existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? handle = default(string?); + if (jsonObject["handle"] != null) + { + handle = jsonObject["handle"].ToObject(); + } + bool? isDefault = default(bool?); + if (jsonObject["is_default"] != null) + { + isDefault = jsonObject["is_default"].ToObject(serializer); + } + string? externalId = default(string?); + if (jsonObject["external_id"] != null) + { + externalId = jsonObject["external_id"].ToObject(); + } + bool? isAutoMembershipEnabled = default(bool?); + if (jsonObject["is_auto_membership_enabled"] != null) + { + isAutoMembershipEnabled = jsonObject["is_auto_membership_enabled"].ToObject(serializer); + } + + return new OrganizationItemSchema( + code: code != null ? new Option(code) : default, name: name != null ? new Option(name) : default, handle: handle != null ? new Option(handle) : default, isDefault: isDefault != null ? new Option(isDefault) : default, externalId: externalId != null ? new Option(externalId) : default, isAutoMembershipEnabled: isAutoMembershipEnabled != null ? new Option(isAutoMembershipEnabled) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationItemSchema value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.HandleOption.IsSet && value.Handle != null) + { + writer.WritePropertyName("handle"); + serializer.Serialize(writer, value.Handle); + } + if (value.IsDefaultOption.IsSet && value.IsDefault != null) + { + writer.WritePropertyName("is_default"); + serializer.Serialize(writer, value.IsDefault); + } + if (value.ExternalIdOption.IsSet && value.ExternalId != null) + { + writer.WritePropertyName("external_id"); + serializer.Serialize(writer, value.ExternalId); + } + if (value.IsAutoMembershipEnabledOption.IsSet && value.IsAutoMembershipEnabled != null) + { + writer.WritePropertyName("is_auto_membership_enabled"); + serializer.Serialize(writer, value.IsAutoMembershipEnabled); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserNewtonsoftConverter.cs new file mode 100644 index 0000000..9d6e0a6 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserNewtonsoftConverter.cs @@ -0,0 +1,130 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUser that handles the Option<> structure + /// + public class OrganizationUserNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUser ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUser existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? fullName = default(string?); + if (jsonObject["full_name"] != null) + { + fullName = jsonObject["full_name"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + string? joinedOn = default(string?); + if (jsonObject["joined_on"] != null) + { + joinedOn = jsonObject["joined_on"].ToObject(); + } + string? lastAccessedOn = default(string?); + if (jsonObject["last_accessed_on"] != null) + { + lastAccessedOn = jsonObject["last_accessed_on"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + + return new OrganizationUser( + id: id != null ? new Option(id) : default, email: email != null ? new Option(email) : default, fullName: fullName != null ? new Option(fullName) : default, lastName: lastName != null ? new Option(lastName) : default, firstName: firstName != null ? new Option(firstName) : default, picture: picture != null ? new Option(picture) : default, joinedOn: joinedOn != null ? new Option(joinedOn) : default, lastAccessedOn: lastAccessedOn != null ? new Option(lastAccessedOn) : default, roles: roles != null ? new Option?>(roles) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUser value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.FullNameOption.IsSet && value.FullName != null) + { + writer.WritePropertyName("full_name"); + serializer.Serialize(writer, value.FullName); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.JoinedOnOption.IsSet && value.JoinedOn != null) + { + writer.WritePropertyName("joined_on"); + serializer.Serialize(writer, value.JoinedOn); + } + if (value.LastAccessedOnOption.IsSet && value.LastAccessedOn != null) + { + writer.WritePropertyName("last_accessed_on"); + serializer.Serialize(writer, value.LastAccessedOn); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserPermissionNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserPermissionNewtonsoftConverter.cs new file mode 100644 index 0000000..b5eda54 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserPermissionNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUserPermission that handles the Option<> structure + /// + public class OrganizationUserPermissionNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUserPermission ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUserPermission existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + + return new OrganizationUserPermission( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, roles: roles != null ? new Option?>(roles) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUserPermission value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserPermissionRolesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserPermissionRolesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..8349679 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserPermissionRolesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUserPermissionRolesInner that handles the Option<> structure + /// + public class OrganizationUserPermissionRolesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUserPermissionRolesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUserPermissionRolesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new OrganizationUserPermissionRolesInner( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUserPermissionRolesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserRoleNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserRoleNewtonsoftConverter.cs new file mode 100644 index 0000000..f7781b2 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserRoleNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUserRole that handles the Option<> structure + /// + public class OrganizationUserRoleNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUserRole ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUserRole existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new OrganizationUserRole( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUserRole value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserRolePermissionsNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserRolePermissionsNewtonsoftConverter.cs new file mode 100644 index 0000000..4e46d40 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserRolePermissionsNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUserRolePermissions that handles the Option<> structure + /// + public class OrganizationUserRolePermissionsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUserRolePermissions ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUserRolePermissions existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? role = default(string?); + if (jsonObject["role"] != null) + { + role = jsonObject["role"].ToObject(); + } + OrganizationUserRolePermissionsPermissions? permissions = default(OrganizationUserRolePermissionsPermissions?); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject(serializer); + } + + return new OrganizationUserRolePermissions( + id: id != null ? new Option(id) : default, role: role != null ? new Option(role) : default, permissions: permissions != null ? new Option(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUserRolePermissions value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.RoleOption.IsSet && value.Role != null) + { + writer.WritePropertyName("role"); + serializer.Serialize(writer, value.Role); + } + if (value.PermissionsOption.IsSet && value.Permissions != null) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/OrganizationUserRolePermissionsPermissionsNewtonsoftConverter.cs b/Kinde.Api/Converters/OrganizationUserRolePermissionsPermissionsNewtonsoftConverter.cs new file mode 100644 index 0000000..61d17b4 --- /dev/null +++ b/Kinde.Api/Converters/OrganizationUserRolePermissionsPermissionsNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for OrganizationUserRolePermissionsPermissions that handles the Option<> structure + /// + public class OrganizationUserRolePermissionsPermissionsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override OrganizationUserRolePermissionsPermissions ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, OrganizationUserRolePermissionsPermissions existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new OrganizationUserRolePermissionsPermissions( + key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, OrganizationUserRolePermissionsPermissions value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/PermissionsNewtonsoftConverter.cs b/Kinde.Api/Converters/PermissionsNewtonsoftConverter.cs new file mode 100644 index 0000000..33151b0 --- /dev/null +++ b/Kinde.Api/Converters/PermissionsNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Permissions that handles the Option<> structure + /// + public class PermissionsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Permissions ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Permissions existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + + return new Permissions( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Permissions value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/PropertyNewtonsoftConverter.cs b/Kinde.Api/Converters/PropertyNewtonsoftConverter.cs new file mode 100644 index 0000000..5fb4ec6 --- /dev/null +++ b/Kinde.Api/Converters/PropertyNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Property that handles the Option<> structure + /// + public class PropertyNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Property ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Property existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + bool? isPrivate = default(bool?); + if (jsonObject["is_private"] != null) + { + isPrivate = jsonObject["is_private"].ToObject(serializer); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + bool? isKindeProperty = default(bool?); + if (jsonObject["is_kinde_property"] != null) + { + isKindeProperty = jsonObject["is_kinde_property"].ToObject(serializer); + } + + return new Property( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, isPrivate: isPrivate != null ? new Option(isPrivate) : default, description: description != null ? new Option(description) : default, isKindeProperty: isKindeProperty != null ? new Option(isKindeProperty) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Property value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.IsPrivateOption.IsSet && value.IsPrivate != null) + { + writer.WritePropertyName("is_private"); + serializer.Serialize(writer, value.IsPrivate); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.IsKindePropertyOption.IsSet && value.IsKindeProperty != null) + { + writer.WritePropertyName("is_kinde_property"); + serializer.Serialize(writer, value.IsKindeProperty); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/PropertyValueNewtonsoftConverter.cs b/Kinde.Api/Converters/PropertyValueNewtonsoftConverter.cs new file mode 100644 index 0000000..5bfd545 --- /dev/null +++ b/Kinde.Api/Converters/PropertyValueNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for PropertyValue that handles the Option<> structure + /// + public class PropertyValueNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override PropertyValue ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, PropertyValue existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? value = default(string?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + + return new PropertyValue( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, key: key != null ? new Option(key) : default, value: value != null ? new Option(value) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, PropertyValue value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReadEnvLogoResponseLogosInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/ReadEnvLogoResponseLogosInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..e44057c --- /dev/null +++ b/Kinde.Api/Converters/ReadEnvLogoResponseLogosInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReadEnvLogoResponseLogosInner that handles the Option<> structure + /// + public class ReadEnvLogoResponseLogosInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReadEnvLogoResponseLogosInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReadEnvLogoResponseLogosInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? fileName = default(string?); + if (jsonObject["file_name"] != null) + { + fileName = jsonObject["file_name"].ToObject(); + } + + return new ReadEnvLogoResponseLogosInner( + type: type != null ? new Option(type) : default, fileName: fileName != null ? new Option(fileName) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReadEnvLogoResponseLogosInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.FileNameOption.IsSet && value.FileName != null) + { + writer.WritePropertyName("file_name"); + serializer.Serialize(writer, value.FileName); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReadEnvLogoResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/ReadEnvLogoResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..9e29d0b --- /dev/null +++ b/Kinde.Api/Converters/ReadEnvLogoResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReadEnvLogoResponse that handles the Option<> structure + /// + public class ReadEnvLogoResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReadEnvLogoResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReadEnvLogoResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + List logos = default(List); + if (jsonObject["logos"] != null) + { + logos = jsonObject["logos"].ToObject>(serializer); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new ReadEnvLogoResponse( + code: code != null ? new Option(code) : default, logos: logos != null ? new Option?>(logos) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReadEnvLogoResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.LogosOption.IsSet) + { + writer.WritePropertyName("logos"); + serializer.Serialize(writer, value.Logos); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReadLogoResponseLogosInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/ReadLogoResponseLogosInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..93ed16a --- /dev/null +++ b/Kinde.Api/Converters/ReadLogoResponseLogosInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReadLogoResponseLogosInner that handles the Option<> structure + /// + public class ReadLogoResponseLogosInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReadLogoResponseLogosInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReadLogoResponseLogosInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? fileName = default(string?); + if (jsonObject["file_name"] != null) + { + fileName = jsonObject["file_name"].ToObject(); + } + string? path = default(string?); + if (jsonObject["path"] != null) + { + path = jsonObject["path"].ToObject(); + } + + return new ReadLogoResponseLogosInner( + type: type != null ? new Option(type) : default, fileName: fileName != null ? new Option(fileName) : default, path: path != null ? new Option(path) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReadLogoResponseLogosInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.FileNameOption.IsSet && value.FileName != null) + { + writer.WritePropertyName("file_name"); + serializer.Serialize(writer, value.FileName); + } + if (value.PathOption.IsSet && value.Path != null) + { + writer.WritePropertyName("path"); + serializer.Serialize(writer, value.Path); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReadLogoResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/ReadLogoResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..5215b93 --- /dev/null +++ b/Kinde.Api/Converters/ReadLogoResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReadLogoResponse that handles the Option<> structure + /// + public class ReadLogoResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReadLogoResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReadLogoResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + List logos = default(List); + if (jsonObject["logos"] != null) + { + logos = jsonObject["logos"].ToObject>(serializer); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + + return new ReadLogoResponse( + code: code != null ? new Option(code) : default, logos: logos != null ? new Option?>(logos) : default, message: message != null ? new Option(message) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReadLogoResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.LogosOption.IsSet) + { + writer.WritePropertyName("logos"); + serializer.Serialize(writer, value.Logos); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RedirectCallbackUrlsNewtonsoftConverter.cs b/Kinde.Api/Converters/RedirectCallbackUrlsNewtonsoftConverter.cs new file mode 100644 index 0000000..23355a5 --- /dev/null +++ b/Kinde.Api/Converters/RedirectCallbackUrlsNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for RedirectCallbackUrls that handles the Option<> structure + /// + public class RedirectCallbackUrlsNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override RedirectCallbackUrls ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, RedirectCallbackUrls existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List redirectUrls = default(List); + if (jsonObject["redirect_urls"] != null) + { + redirectUrls = jsonObject["redirect_urls"].ToObject>(serializer); + } + + return new RedirectCallbackUrls( + redirectUrls: redirectUrls != null ? new Option?>(redirectUrls) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, RedirectCallbackUrls value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.RedirectUrlsOption.IsSet) + { + writer.WritePropertyName("redirect_urls"); + serializer.Serialize(writer, value.RedirectUrls); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceConnectionRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceConnectionRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..6842470 --- /dev/null +++ b/Kinde.Api/Converters/ReplaceConnectionRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceConnectionRequest that handles the Option<> structure + /// + public class ReplaceConnectionRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceConnectionRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceConnectionRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? displayName = default(string?); + if (jsonObject["display_name"] != null) + { + displayName = jsonObject["display_name"].ToObject(); + } + List enabledApplications = default(List); + if (jsonObject["enabled_applications"] != null) + { + enabledApplications = jsonObject["enabled_applications"].ToObject>(serializer); + } + ReplaceConnectionRequestOptions? options = default(ReplaceConnectionRequestOptions?); + if (jsonObject["options"] != null) + { + options = jsonObject["options"].ToObject(serializer); + } + + return new ReplaceConnectionRequest( + name: name != null ? new Option(name) : default, displayName: displayName != null ? new Option(displayName) : default, enabledApplications: enabledApplications != null ? new Option?>(enabledApplications) : default, options: options != null ? new Option(options) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceConnectionRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DisplayNameOption.IsSet && value.DisplayName != null) + { + writer.WritePropertyName("display_name"); + serializer.Serialize(writer, value.DisplayName); + } + if (value.EnabledApplicationsOption.IsSet) + { + writer.WritePropertyName("enabled_applications"); + serializer.Serialize(writer, value.EnabledApplications); + } + if (value.OptionsOption.IsSet && value.Options != null) + { + writer.WritePropertyName("options"); + serializer.Serialize(writer, value.Options); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOf1NewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOf1NewtonsoftConverter.cs new file mode 100644 index 0000000..aae0f4d --- /dev/null +++ b/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOf1NewtonsoftConverter.cs @@ -0,0 +1,160 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceConnectionRequestOptionsOneOf1 that handles the Option<> structure + /// + public class ReplaceConnectionRequestOptionsOneOf1NewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceConnectionRequestOptionsOneOf1 ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceConnectionRequestOptionsOneOf1 existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List homeRealmDomains = default(List); + if (jsonObject["home_realm_domains"] != null) + { + homeRealmDomains = jsonObject["home_realm_domains"].ToObject>(serializer); + } + string? samlEntityId = default(string?); + if (jsonObject["saml_entity_id"] != null) + { + samlEntityId = jsonObject["saml_entity_id"].ToObject(); + } + string? samlAcsUrl = default(string?); + if (jsonObject["saml_acs_url"] != null) + { + samlAcsUrl = jsonObject["saml_acs_url"].ToObject(); + } + string? samlIdpMetadataUrl = default(string?); + if (jsonObject["saml_idp_metadata_url"] != null) + { + samlIdpMetadataUrl = jsonObject["saml_idp_metadata_url"].ToObject(); + } + string? samlEmailKeyAttr = default(string?); + if (jsonObject["saml_email_key_attr"] != null) + { + samlEmailKeyAttr = jsonObject["saml_email_key_attr"].ToObject(); + } + string? samlFirstNameKeyAttr = default(string?); + if (jsonObject["saml_first_name_key_attr"] != null) + { + samlFirstNameKeyAttr = jsonObject["saml_first_name_key_attr"].ToObject(); + } + string? samlLastNameKeyAttr = default(string?); + if (jsonObject["saml_last_name_key_attr"] != null) + { + samlLastNameKeyAttr = jsonObject["saml_last_name_key_attr"].ToObject(); + } + bool? isCreateMissingUser = default(bool?); + if (jsonObject["is_create_missing_user"] != null) + { + isCreateMissingUser = jsonObject["is_create_missing_user"].ToObject(serializer); + } + bool? isForceShowSsoButton = default(bool?); + if (jsonObject["is_force_show_sso_button"] != null) + { + isForceShowSsoButton = jsonObject["is_force_show_sso_button"].ToObject(serializer); + } + Dictionary upstreamParams = default(Dictionary); + if (jsonObject["upstream_params"] != null) + { + upstreamParams = jsonObject["upstream_params"].ToObject>(serializer); + } + string? samlSigningCertificate = default(string?); + if (jsonObject["saml_signing_certificate"] != null) + { + samlSigningCertificate = jsonObject["saml_signing_certificate"].ToObject(); + } + string? samlSigningPrivateKey = default(string?); + if (jsonObject["saml_signing_private_key"] != null) + { + samlSigningPrivateKey = jsonObject["saml_signing_private_key"].ToObject(); + } + + return new ReplaceConnectionRequestOptionsOneOf1( + homeRealmDomains: homeRealmDomains != null ? new Option?>(homeRealmDomains) : default, samlEntityId: samlEntityId != null ? new Option(samlEntityId) : default, samlAcsUrl: samlAcsUrl != null ? new Option(samlAcsUrl) : default, samlIdpMetadataUrl: samlIdpMetadataUrl != null ? new Option(samlIdpMetadataUrl) : default, samlEmailKeyAttr: samlEmailKeyAttr != null ? new Option(samlEmailKeyAttr) : default, samlFirstNameKeyAttr: samlFirstNameKeyAttr != null ? new Option(samlFirstNameKeyAttr) : default, samlLastNameKeyAttr: samlLastNameKeyAttr != null ? new Option(samlLastNameKeyAttr) : default, isCreateMissingUser: isCreateMissingUser != null ? new Option(isCreateMissingUser) : default, isForceShowSsoButton: isForceShowSsoButton != null ? new Option(isForceShowSsoButton) : default, upstreamParams: upstreamParams != null ? new Option>(upstreamParams) : default, samlSigningCertificate: samlSigningCertificate != null ? new Option(samlSigningCertificate) : default, samlSigningPrivateKey: samlSigningPrivateKey != null ? new Option(samlSigningPrivateKey) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceConnectionRequestOptionsOneOf1 value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HomeRealmDomainsOption.IsSet) + { + writer.WritePropertyName("home_realm_domains"); + serializer.Serialize(writer, value.HomeRealmDomains); + } + if (value.SamlEntityIdOption.IsSet && value.SamlEntityId != null) + { + writer.WritePropertyName("saml_entity_id"); + serializer.Serialize(writer, value.SamlEntityId); + } + if (value.SamlAcsUrlOption.IsSet && value.SamlAcsUrl != null) + { + writer.WritePropertyName("saml_acs_url"); + serializer.Serialize(writer, value.SamlAcsUrl); + } + if (value.SamlIdpMetadataUrlOption.IsSet && value.SamlIdpMetadataUrl != null) + { + writer.WritePropertyName("saml_idp_metadata_url"); + serializer.Serialize(writer, value.SamlIdpMetadataUrl); + } + if (value.SamlEmailKeyAttrOption.IsSet && value.SamlEmailKeyAttr != null) + { + writer.WritePropertyName("saml_email_key_attr"); + serializer.Serialize(writer, value.SamlEmailKeyAttr); + } + if (value.SamlFirstNameKeyAttrOption.IsSet && value.SamlFirstNameKeyAttr != null) + { + writer.WritePropertyName("saml_first_name_key_attr"); + serializer.Serialize(writer, value.SamlFirstNameKeyAttr); + } + if (value.SamlLastNameKeyAttrOption.IsSet && value.SamlLastNameKeyAttr != null) + { + writer.WritePropertyName("saml_last_name_key_attr"); + serializer.Serialize(writer, value.SamlLastNameKeyAttr); + } + if (value.IsCreateMissingUserOption.IsSet && value.IsCreateMissingUser != null) + { + writer.WritePropertyName("is_create_missing_user"); + serializer.Serialize(writer, value.IsCreateMissingUser); + } + if (value.IsForceShowSsoButtonOption.IsSet && value.IsForceShowSsoButton != null) + { + writer.WritePropertyName("is_force_show_sso_button"); + serializer.Serialize(writer, value.IsForceShowSsoButton); + } + if (value.UpstreamParamsOption.IsSet) + { + writer.WritePropertyName("upstream_params"); + serializer.Serialize(writer, value.UpstreamParams); + } + if (value.SamlSigningCertificateOption.IsSet && value.SamlSigningCertificate != null) + { + writer.WritePropertyName("saml_signing_certificate"); + serializer.Serialize(writer, value.SamlSigningCertificate); + } + if (value.SamlSigningPrivateKeyOption.IsSet && value.SamlSigningPrivateKey != null) + { + writer.WritePropertyName("saml_signing_private_key"); + serializer.Serialize(writer, value.SamlSigningPrivateKey); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOfNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOfNewtonsoftConverter.cs new file mode 100644 index 0000000..e7df6f2 --- /dev/null +++ b/Kinde.Api/Converters/ReplaceConnectionRequestOptionsOneOfNewtonsoftConverter.cs @@ -0,0 +1,150 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceConnectionRequestOptionsOneOf that handles the Option<> structure + /// + public class ReplaceConnectionRequestOptionsOneOfNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceConnectionRequestOptionsOneOf ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceConnectionRequestOptionsOneOf existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + List homeRealmDomains = default(List); + if (jsonObject["home_realm_domains"] != null) + { + homeRealmDomains = jsonObject["home_realm_domains"].ToObject>(serializer); + } + string? entraIdDomain = default(string?); + if (jsonObject["entra_id_domain"] != null) + { + entraIdDomain = jsonObject["entra_id_domain"].ToObject(); + } + bool? isUseCommonEndpoint = default(bool?); + if (jsonObject["is_use_common_endpoint"] != null) + { + isUseCommonEndpoint = jsonObject["is_use_common_endpoint"].ToObject(serializer); + } + bool? isSyncUserProfileOnLogin = default(bool?); + if (jsonObject["is_sync_user_profile_on_login"] != null) + { + isSyncUserProfileOnLogin = jsonObject["is_sync_user_profile_on_login"].ToObject(serializer); + } + bool? isRetrieveProviderUserGroups = default(bool?); + if (jsonObject["is_retrieve_provider_user_groups"] != null) + { + isRetrieveProviderUserGroups = jsonObject["is_retrieve_provider_user_groups"].ToObject(serializer); + } + bool? isExtendedAttributesRequired = default(bool?); + if (jsonObject["is_extended_attributes_required"] != null) + { + isExtendedAttributesRequired = jsonObject["is_extended_attributes_required"].ToObject(serializer); + } + bool? isCreateMissingUser = default(bool?); + if (jsonObject["is_create_missing_user"] != null) + { + isCreateMissingUser = jsonObject["is_create_missing_user"].ToObject(serializer); + } + bool? isForceShowSsoButton = default(bool?); + if (jsonObject["is_force_show_sso_button"] != null) + { + isForceShowSsoButton = jsonObject["is_force_show_sso_button"].ToObject(serializer); + } + Dictionary upstreamParams = default(Dictionary); + if (jsonObject["upstream_params"] != null) + { + upstreamParams = jsonObject["upstream_params"].ToObject>(serializer); + } + + return new ReplaceConnectionRequestOptionsOneOf( + clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default, homeRealmDomains: homeRealmDomains != null ? new Option?>(homeRealmDomains) : default, entraIdDomain: entraIdDomain != null ? new Option(entraIdDomain) : default, isUseCommonEndpoint: isUseCommonEndpoint != null ? new Option(isUseCommonEndpoint) : default, isSyncUserProfileOnLogin: isSyncUserProfileOnLogin != null ? new Option(isSyncUserProfileOnLogin) : default, isRetrieveProviderUserGroups: isRetrieveProviderUserGroups != null ? new Option(isRetrieveProviderUserGroups) : default, isExtendedAttributesRequired: isExtendedAttributesRequired != null ? new Option(isExtendedAttributesRequired) : default, isCreateMissingUser: isCreateMissingUser != null ? new Option(isCreateMissingUser) : default, isForceShowSsoButton: isForceShowSsoButton != null ? new Option(isForceShowSsoButton) : default, upstreamParams: upstreamParams != null ? new Option>(upstreamParams) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceConnectionRequestOptionsOneOf value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + if (value.HomeRealmDomainsOption.IsSet) + { + writer.WritePropertyName("home_realm_domains"); + serializer.Serialize(writer, value.HomeRealmDomains); + } + if (value.EntraIdDomainOption.IsSet && value.EntraIdDomain != null) + { + writer.WritePropertyName("entra_id_domain"); + serializer.Serialize(writer, value.EntraIdDomain); + } + if (value.IsUseCommonEndpointOption.IsSet && value.IsUseCommonEndpoint != null) + { + writer.WritePropertyName("is_use_common_endpoint"); + serializer.Serialize(writer, value.IsUseCommonEndpoint); + } + if (value.IsSyncUserProfileOnLoginOption.IsSet && value.IsSyncUserProfileOnLogin != null) + { + writer.WritePropertyName("is_sync_user_profile_on_login"); + serializer.Serialize(writer, value.IsSyncUserProfileOnLogin); + } + if (value.IsRetrieveProviderUserGroupsOption.IsSet && value.IsRetrieveProviderUserGroups != null) + { + writer.WritePropertyName("is_retrieve_provider_user_groups"); + serializer.Serialize(writer, value.IsRetrieveProviderUserGroups); + } + if (value.IsExtendedAttributesRequiredOption.IsSet && value.IsExtendedAttributesRequired != null) + { + writer.WritePropertyName("is_extended_attributes_required"); + serializer.Serialize(writer, value.IsExtendedAttributesRequired); + } + if (value.IsCreateMissingUserOption.IsSet && value.IsCreateMissingUser != null) + { + writer.WritePropertyName("is_create_missing_user"); + serializer.Serialize(writer, value.IsCreateMissingUser); + } + if (value.IsForceShowSsoButtonOption.IsSet && value.IsForceShowSsoButton != null) + { + writer.WritePropertyName("is_force_show_sso_button"); + serializer.Serialize(writer, value.IsForceShowSsoButton); + } + if (value.UpstreamParamsOption.IsSet) + { + writer.WritePropertyName("upstream_params"); + serializer.Serialize(writer, value.UpstreamParams); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceLogoutRedirectURLsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceLogoutRedirectURLsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..34e1049 --- /dev/null +++ b/Kinde.Api/Converters/ReplaceLogoutRedirectURLsRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceLogoutRedirectURLsRequest that handles the Option<> structure + /// + public class ReplaceLogoutRedirectURLsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceLogoutRedirectURLsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceLogoutRedirectURLsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List urls = default(List); + if (jsonObject["urls"] != null) + { + urls = jsonObject["urls"].ToObject>(serializer); + } + + return new ReplaceLogoutRedirectURLsRequest( + urls: urls != null ? new Option?>(urls) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceLogoutRedirectURLsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UrlsOption.IsSet) + { + writer.WritePropertyName("urls"); + serializer.Serialize(writer, value.Urls); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceMFARequestNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceMFARequestNewtonsoftConverter.cs new file mode 100644 index 0000000..206a170 --- /dev/null +++ b/Kinde.Api/Converters/ReplaceMFARequestNewtonsoftConverter.cs @@ -0,0 +1,54 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceMFARequest that handles the Option<> structure + /// + public class ReplaceMFARequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceMFARequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceMFARequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + ReplaceMFARequest.PolicyEnum policy = default(ReplaceMFARequest.PolicyEnum); + if (jsonObject["policy"] != null) + { + var policyStr = jsonObject["policy"].ToObject(); + if (!string.IsNullOrEmpty(policyStr)) + { + policy = ReplaceMFARequest.PolicyEnumFromString(policyStr); + } + } + List enabledFactors = default(List); + if (jsonObject["enabled_factors"] != null) + { + enabledFactors = jsonObject["enabled_factors"].ToObject>(serializer); + } + + return new ReplaceMFARequest( + policy: policy, enabledFactors: enabledFactors ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceMFARequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceOrganizationMFARequestNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceOrganizationMFARequestNewtonsoftConverter.cs new file mode 100644 index 0000000..a6d894e --- /dev/null +++ b/Kinde.Api/Converters/ReplaceOrganizationMFARequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceOrganizationMFARequest that handles the Option<> structure + /// + public class ReplaceOrganizationMFARequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceOrganizationMFARequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceOrganizationMFARequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List enabledFactors = default(List); + if (jsonObject["enabled_factors"] != null) + { + enabledFactors = jsonObject["enabled_factors"].ToObject>(serializer); + } + + return new ReplaceOrganizationMFARequest( + enabledFactors: enabledFactors ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceOrganizationMFARequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ReplaceRedirectCallbackURLsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/ReplaceRedirectCallbackURLsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..5f04bde --- /dev/null +++ b/Kinde.Api/Converters/ReplaceRedirectCallbackURLsRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for ReplaceRedirectCallbackURLsRequest that handles the Option<> structure + /// + public class ReplaceRedirectCallbackURLsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override ReplaceRedirectCallbackURLsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, ReplaceRedirectCallbackURLsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List urls = default(List); + if (jsonObject["urls"] != null) + { + urls = jsonObject["urls"].ToObject>(serializer); + } + + return new ReplaceRedirectCallbackURLsRequest( + urls: urls != null ? new Option?>(urls) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, ReplaceRedirectCallbackURLsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UrlsOption.IsSet) + { + writer.WritePropertyName("urls"); + serializer.Serialize(writer, value.Urls); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RoleNewtonsoftConverter.cs b/Kinde.Api/Converters/RoleNewtonsoftConverter.cs new file mode 100644 index 0000000..ec940b8 --- /dev/null +++ b/Kinde.Api/Converters/RoleNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Role that handles the Option<> structure + /// + public class RoleNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Role ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Role existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + + return new Role( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Role value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RolePermissionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/RolePermissionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..40bfb7a --- /dev/null +++ b/Kinde.Api/Converters/RolePermissionsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for RolePermissionsResponse that handles the Option<> structure + /// + public class RolePermissionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override RolePermissionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, RolePermissionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new RolePermissionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, permissions: permissions != null ? new Option?>(permissions) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, RolePermissionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RoleScopesResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/RoleScopesResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..44f9a63 --- /dev/null +++ b/Kinde.Api/Converters/RoleScopesResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for RoleScopesResponse that handles the Option<> structure + /// + public class RoleScopesResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override RoleScopesResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, RoleScopesResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + + return new RoleScopesResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, scopes: scopes != null ? new Option?>(scopes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, RoleScopesResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RolesNewtonsoftConverter.cs b/Kinde.Api/Converters/RolesNewtonsoftConverter.cs new file mode 100644 index 0000000..7501d58 --- /dev/null +++ b/Kinde.Api/Converters/RolesNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Roles that handles the Option<> structure + /// + public class RolesNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Roles ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Roles existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + bool? isDefaultRole = default(bool?); + if (jsonObject["is_default_role"] != null) + { + isDefaultRole = jsonObject["is_default_role"].ToObject(serializer); + } + + return new Roles( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default, isDefaultRole: isDefaultRole != null ? new Option(isDefaultRole) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Roles value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.IsDefaultRoleOption.IsSet && value.IsDefaultRole != null) + { + writer.WritePropertyName("is_default_role"); + serializer.Serialize(writer, value.IsDefaultRole); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RotateApiKeyResponseApiKeyNewtonsoftConverter.cs b/Kinde.Api/Converters/RotateApiKeyResponseApiKeyNewtonsoftConverter.cs new file mode 100644 index 0000000..5d1d1a6 --- /dev/null +++ b/Kinde.Api/Converters/RotateApiKeyResponseApiKeyNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for RotateApiKeyResponseApiKey that handles the Option<> structure + /// + public class RotateApiKeyResponseApiKeyNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override RotateApiKeyResponseApiKey ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, RotateApiKeyResponseApiKey existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new RotateApiKeyResponseApiKey( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, RotateApiKeyResponseApiKey value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/RotateApiKeyResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/RotateApiKeyResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..2203649 --- /dev/null +++ b/Kinde.Api/Converters/RotateApiKeyResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for RotateApiKeyResponse that handles the Option<> structure + /// + public class RotateApiKeyResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override RotateApiKeyResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, RotateApiKeyResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + RotateApiKeyResponseApiKey? apiKey = default(RotateApiKeyResponseApiKey?); + if (jsonObject["api_key"] != null) + { + apiKey = jsonObject["api_key"].ToObject(serializer); + } + + return new RotateApiKeyResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, apiKey: apiKey != null ? new Option(apiKey) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, RotateApiKeyResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ApiKeyOption.IsSet && value.ApiKey != null) + { + writer.WritePropertyName("api_key"); + serializer.Serialize(writer, value.ApiKey); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/ScopesNewtonsoftConverter.cs b/Kinde.Api/Converters/ScopesNewtonsoftConverter.cs new file mode 100644 index 0000000..5d01537 --- /dev/null +++ b/Kinde.Api/Converters/ScopesNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Scopes that handles the Option<> structure + /// + public class ScopesNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Scopes ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Scopes existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string? apiId = default(string?); + if (jsonObject["api_id"] != null) + { + apiId = jsonObject["api_id"].ToObject(); + } + + return new Scopes( + id: id != null ? new Option(id) : default, key: key != null ? new Option(key) : default, description: description != null ? new Option(description) : default, apiId: apiId != null ? new Option(apiId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Scopes value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.ApiIdOption.IsSet && value.ApiId != null) + { + writer.WritePropertyName("api_id"); + serializer.Serialize(writer, value.ApiId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SearchUsersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/SearchUsersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..6939e1e --- /dev/null +++ b/Kinde.Api/Converters/SearchUsersResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SearchUsersResponse that handles the Option<> structure + /// + public class SearchUsersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SearchUsersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SearchUsersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List results = default(List); + if (jsonObject["results"] != null) + { + results = jsonObject["results"].ToObject>(serializer); + } + + return new SearchUsersResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, results: results != null ? new Option?>(results) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SearchUsersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.ResultsOption.IsSet) + { + writer.WritePropertyName("results"); + serializer.Serialize(writer, value.Results); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SearchUsersResponseResultsInnerApiScopesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/SearchUsersResponseResultsInnerApiScopesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..a0618b7 --- /dev/null +++ b/Kinde.Api/Converters/SearchUsersResponseResultsInnerApiScopesInnerNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SearchUsersResponseResultsInnerApiScopesInner that handles the Option<> structure + /// + public class SearchUsersResponseResultsInnerApiScopesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SearchUsersResponseResultsInnerApiScopesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SearchUsersResponseResultsInnerApiScopesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + string? scope = default(string?); + if (jsonObject["scope"] != null) + { + scope = jsonObject["scope"].ToObject(); + } + string? apiId = default(string?); + if (jsonObject["api_id"] != null) + { + apiId = jsonObject["api_id"].ToObject(); + } + + return new SearchUsersResponseResultsInnerApiScopesInner( + orgCode: orgCode != null ? new Option(orgCode) : default, scope: scope != null ? new Option(scope) : default, apiId: apiId != null ? new Option(apiId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SearchUsersResponseResultsInnerApiScopesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.ScopeOption.IsSet && value.Scope != null) + { + writer.WritePropertyName("scope"); + serializer.Serialize(writer, value.Scope); + } + if (value.ApiIdOption.IsSet && value.ApiId != null) + { + writer.WritePropertyName("api_id"); + serializer.Serialize(writer, value.ApiId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SearchUsersResponseResultsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/SearchUsersResponseResultsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..583fbe2 --- /dev/null +++ b/Kinde.Api/Converters/SearchUsersResponseResultsInnerNewtonsoftConverter.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SearchUsersResponseResultsInner that handles the Option<> structure + /// + public class SearchUsersResponseResultsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SearchUsersResponseResultsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SearchUsersResponseResultsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? username = default(string?); + if (jsonObject["username"] != null) + { + username = jsonObject["username"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + bool? isSuspended = default(bool?); + if (jsonObject["is_suspended"] != null) + { + isSuspended = jsonObject["is_suspended"].ToObject(serializer); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + int? totalSignIns = default(int?); + if (jsonObject["total_sign_ins"] != null) + { + totalSignIns = jsonObject["total_sign_ins"].ToObject(serializer); + } + int? failedSignIns = default(int?); + if (jsonObject["failed_sign_ins"] != null) + { + failedSignIns = jsonObject["failed_sign_ins"].ToObject(serializer); + } + string? lastSignedIn = default(string?); + if (jsonObject["last_signed_in"] != null) + { + lastSignedIn = jsonObject["last_signed_in"].ToObject(); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + List organizations = default(List); + if (jsonObject["organizations"] != null) + { + organizations = jsonObject["organizations"].ToObject>(serializer); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); + } + Dictionary properties = default(Dictionary); + if (jsonObject["properties"] != null) + { + properties = jsonObject["properties"].ToObject>(serializer); + } + List apiScopes = default(List); + if (jsonObject["api_scopes"] != null) + { + apiScopes = jsonObject["api_scopes"].ToObject>(serializer); + } + + return new SearchUsersResponseResultsInner( + id: id != null ? new Option(id) : default, providedId: providedId != null ? new Option(providedId) : default, email: email != null ? new Option(email) : default, username: username != null ? new Option(username) : default, lastName: lastName != null ? new Option(lastName) : default, firstName: firstName != null ? new Option(firstName) : default, isSuspended: isSuspended != null ? new Option(isSuspended) : default, picture: picture != null ? new Option(picture) : default, totalSignIns: totalSignIns != null ? new Option(totalSignIns) : default, failedSignIns: failedSignIns != null ? new Option(failedSignIns) : default, lastSignedIn: lastSignedIn != null ? new Option(lastSignedIn) : default, createdOn: createdOn != null ? new Option(createdOn) : default, organizations: organizations != null ? new Option?>(organizations) : default, identities: identities != null ? new Option?>(identities) : default, properties: properties != null ? new Option>(properties) : default, apiScopes: apiScopes != null ? new Option?>(apiScopes) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SearchUsersResponseResultsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.UsernameOption.IsSet && value.Username != null) + { + writer.WritePropertyName("username"); + serializer.Serialize(writer, value.Username); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.IsSuspendedOption.IsSet && value.IsSuspended != null) + { + writer.WritePropertyName("is_suspended"); + serializer.Serialize(writer, value.IsSuspended); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.TotalSignInsOption.IsSet && value.TotalSignIns != null) + { + writer.WritePropertyName("total_sign_ins"); + serializer.Serialize(writer, value.TotalSignIns); + } + if (value.FailedSignInsOption.IsSet && value.FailedSignIns != null) + { + writer.WritePropertyName("failed_sign_ins"); + serializer.Serialize(writer, value.FailedSignIns); + } + if (value.LastSignedInOption.IsSet && value.LastSignedIn != null) + { + writer.WritePropertyName("last_signed_in"); + serializer.Serialize(writer, value.LastSignedIn); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.OrganizationsOption.IsSet) + { + writer.WritePropertyName("organizations"); + serializer.Serialize(writer, value.Organizations); + } + if (value.IdentitiesOption.IsSet) + { + writer.WritePropertyName("identities"); + serializer.Serialize(writer, value.Identities); + } + if (value.PropertiesOption.IsSet) + { + writer.WritePropertyName("properties"); + serializer.Serialize(writer, value.Properties); + } + if (value.ApiScopesOption.IsSet) + { + writer.WritePropertyName("api_scopes"); + serializer.Serialize(writer, value.ApiScopes); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SetUserPasswordRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/SetUserPasswordRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..9265215 --- /dev/null +++ b/Kinde.Api/Converters/SetUserPasswordRequestNewtonsoftConverter.cs @@ -0,0 +1,95 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SetUserPasswordRequest that handles the Option<> structure + /// + public class SetUserPasswordRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SetUserPasswordRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SetUserPasswordRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + SetUserPasswordRequest.HashingMethodEnum? hashingMethod = default(SetUserPasswordRequest.HashingMethodEnum?); + if (jsonObject["hashing_method"] != null) + { + var hashingMethodStr = jsonObject["hashing_method"].ToObject(); + if (!string.IsNullOrEmpty(hashingMethodStr)) + { + hashingMethod = SetUserPasswordRequest.HashingMethodEnumFromString(hashingMethodStr); + } + } + SetUserPasswordRequest.SaltPositionEnum? saltPosition = default(SetUserPasswordRequest.SaltPositionEnum?); + if (jsonObject["salt_position"] != null) + { + var saltPositionStr = jsonObject["salt_position"].ToObject(); + if (!string.IsNullOrEmpty(saltPositionStr)) + { + saltPosition = SetUserPasswordRequest.SaltPositionEnumFromString(saltPositionStr); + } + } + string? salt = default(string?); + if (jsonObject["salt"] != null) + { + salt = jsonObject["salt"].ToObject(); + } + bool? isTemporaryPassword = default(bool?); + if (jsonObject["is_temporary_password"] != null) + { + isTemporaryPassword = jsonObject["is_temporary_password"].ToObject(serializer); + } + string hashedPassword = default(string); + if (jsonObject["hashed_password"] != null) + { + hashedPassword = jsonObject["hashed_password"].ToObject(); + } + + return new SetUserPasswordRequest( + hashingMethod: hashingMethod != null ? new Option(hashingMethod) : default, saltPosition: saltPosition != null ? new Option(saltPosition) : default, salt: salt != null ? new Option(salt) : default, isTemporaryPassword: isTemporaryPassword != null ? new Option(isTemporaryPassword) : default, hashedPassword: hashedPassword ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SetUserPasswordRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.HashingMethodOption.IsSet && value.HashingMethod != null) + { + writer.WritePropertyName("hashing_method"); + var hashingMethodStr = SetUserPasswordRequest.HashingMethodEnumToJsonValue(value.HashingMethod.Value); + writer.WriteValue(hashingMethodStr); + } + if (value.SaltPositionOption.IsSet && value.SaltPosition != null) + { + writer.WritePropertyName("salt_position"); + var saltPositionStr = SetUserPasswordRequest.SaltPositionEnumToJsonValue(value.SaltPosition.Value); + writer.WriteValue(saltPositionStr); + } + if (value.SaltOption.IsSet && value.Salt != null) + { + writer.WritePropertyName("salt"); + serializer.Serialize(writer, value.Salt); + } + if (value.IsTemporaryPasswordOption.IsSet && value.IsTemporaryPassword != null) + { + writer.WritePropertyName("is_temporary_password"); + serializer.Serialize(writer, value.IsTemporaryPassword); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SubscriberNewtonsoftConverter.cs b/Kinde.Api/Converters/SubscriberNewtonsoftConverter.cs new file mode 100644 index 0000000..659bf25 --- /dev/null +++ b/Kinde.Api/Converters/SubscriberNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Subscriber that handles the Option<> structure + /// + public class SubscriberNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Subscriber ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Subscriber existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? preferredEmail = default(string?); + if (jsonObject["preferred_email"] != null) + { + preferredEmail = jsonObject["preferred_email"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + + return new Subscriber( + id: id != null ? new Option(id) : default, preferredEmail: preferredEmail != null ? new Option(preferredEmail) : default, firstName: firstName != null ? new Option(firstName) : default, lastName: lastName != null ? new Option(lastName) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Subscriber value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.PreferredEmailOption.IsSet && value.PreferredEmail != null) + { + writer.WritePropertyName("preferred_email"); + serializer.Serialize(writer, value.PreferredEmail); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SubscribersSubscriberNewtonsoftConverter.cs b/Kinde.Api/Converters/SubscribersSubscriberNewtonsoftConverter.cs new file mode 100644 index 0000000..bedb961 --- /dev/null +++ b/Kinde.Api/Converters/SubscribersSubscriberNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SubscribersSubscriber that handles the Option<> structure + /// + public class SubscribersSubscriberNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SubscribersSubscriber ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SubscribersSubscriber existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? fullName = default(string?); + if (jsonObject["full_name"] != null) + { + fullName = jsonObject["full_name"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + + return new SubscribersSubscriber( + id: id != null ? new Option(id) : default, email: email != null ? new Option(email) : default, fullName: fullName != null ? new Option(fullName) : default, firstName: firstName != null ? new Option(firstName) : default, lastName: lastName != null ? new Option(lastName) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SubscribersSubscriber value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.FullNameOption.IsSet && value.FullName != null) + { + writer.WritePropertyName("full_name"); + serializer.Serialize(writer, value.FullName); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/SuccessResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/SuccessResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..96727d6 --- /dev/null +++ b/Kinde.Api/Converters/SuccessResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for SuccessResponse that handles the Option<> structure + /// + public class SuccessResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override SuccessResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, SuccessResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + + return new SuccessResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, SuccessResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateAPIApplicationsRequestApplicationsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateAPIApplicationsRequestApplicationsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..5284e03 --- /dev/null +++ b/Kinde.Api/Converters/UpdateAPIApplicationsRequestApplicationsInnerNewtonsoftConverter.cs @@ -0,0 +1,55 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateAPIApplicationsRequestApplicationsInner that handles the Option<> structure + /// + public class UpdateAPIApplicationsRequestApplicationsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateAPIApplicationsRequestApplicationsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateAPIApplicationsRequestApplicationsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? operation = default(string?); + if (jsonObject["operation"] != null) + { + operation = jsonObject["operation"].ToObject(); + } + string id = default(string); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new UpdateAPIApplicationsRequestApplicationsInner( + operation: operation != null ? new Option(operation) : default, id: id ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateAPIApplicationsRequestApplicationsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.OperationOption.IsSet && value.Operation != null) + { + writer.WritePropertyName("operation"); + serializer.Serialize(writer, value.Operation); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateAPIApplicationsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateAPIApplicationsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..9fb4b9b --- /dev/null +++ b/Kinde.Api/Converters/UpdateAPIApplicationsRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateAPIApplicationsRequest that handles the Option<> structure + /// + public class UpdateAPIApplicationsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateAPIApplicationsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateAPIApplicationsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List applications = default(List); + if (jsonObject["applications"] != null) + { + applications = jsonObject["applications"].ToObject>(serializer); + } + + return new UpdateAPIApplicationsRequest( + applications: applications ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateAPIApplicationsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateAPIScopeRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateAPIScopeRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..1ec91a9 --- /dev/null +++ b/Kinde.Api/Converters/UpdateAPIScopeRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateAPIScopeRequest that handles the Option<> structure + /// + public class UpdateAPIScopeRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateAPIScopeRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateAPIScopeRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + + return new UpdateAPIScopeRequest( + description: description != null ? new Option(description) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateAPIScopeRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateApplicationRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateApplicationRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..d946120 --- /dev/null +++ b/Kinde.Api/Converters/UpdateApplicationRequestNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateApplicationRequest that handles the Option<> structure + /// + public class UpdateApplicationRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateApplicationRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateApplicationRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? languageKey = default(string?); + if (jsonObject["language_key"] != null) + { + languageKey = jsonObject["language_key"].ToObject(); + } + List logoutUris = default(List); + if (jsonObject["logout_uris"] != null) + { + logoutUris = jsonObject["logout_uris"].ToObject>(serializer); + } + List redirectUris = default(List); + if (jsonObject["redirect_uris"] != null) + { + redirectUris = jsonObject["redirect_uris"].ToObject>(serializer); + } + string? loginUri = default(string?); + if (jsonObject["login_uri"] != null) + { + loginUri = jsonObject["login_uri"].ToObject(); + } + string? homepageUri = default(string?); + if (jsonObject["homepage_uri"] != null) + { + homepageUri = jsonObject["homepage_uri"].ToObject(); + } + + return new UpdateApplicationRequest( + name: name != null ? new Option(name) : default, languageKey: languageKey != null ? new Option(languageKey) : default, logoutUris: logoutUris != null ? new Option?>(logoutUris) : default, redirectUris: redirectUris != null ? new Option?>(redirectUris) : default, loginUri: loginUri != null ? new Option(loginUri) : default, homepageUri: homepageUri != null ? new Option(homepageUri) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateApplicationRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.LanguageKeyOption.IsSet && value.LanguageKey != null) + { + writer.WritePropertyName("language_key"); + serializer.Serialize(writer, value.LanguageKey); + } + if (value.LogoutUrisOption.IsSet) + { + writer.WritePropertyName("logout_uris"); + serializer.Serialize(writer, value.LogoutUris); + } + if (value.RedirectUrisOption.IsSet) + { + writer.WritePropertyName("redirect_uris"); + serializer.Serialize(writer, value.RedirectUris); + } + if (value.LoginUriOption.IsSet && value.LoginUri != null) + { + writer.WritePropertyName("login_uri"); + serializer.Serialize(writer, value.LoginUri); + } + if (value.HomepageUriOption.IsSet && value.HomepageUri != null) + { + writer.WritePropertyName("homepage_uri"); + serializer.Serialize(writer, value.HomepageUri); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateApplicationTokensRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateApplicationTokensRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..2190c6e --- /dev/null +++ b/Kinde.Api/Converters/UpdateApplicationTokensRequestNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateApplicationTokensRequest that handles the Option<> structure + /// + public class UpdateApplicationTokensRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateApplicationTokensRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateApplicationTokensRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + int? accessTokenLifetime = default(int?); + if (jsonObject["access_token_lifetime"] != null) + { + accessTokenLifetime = jsonObject["access_token_lifetime"].ToObject(serializer); + } + int? refreshTokenLifetime = default(int?); + if (jsonObject["refresh_token_lifetime"] != null) + { + refreshTokenLifetime = jsonObject["refresh_token_lifetime"].ToObject(serializer); + } + int? idTokenLifetime = default(int?); + if (jsonObject["id_token_lifetime"] != null) + { + idTokenLifetime = jsonObject["id_token_lifetime"].ToObject(serializer); + } + int? authenticatedSessionLifetime = default(int?); + if (jsonObject["authenticated_session_lifetime"] != null) + { + authenticatedSessionLifetime = jsonObject["authenticated_session_lifetime"].ToObject(serializer); + } + bool? isHasuraMappingEnabled = default(bool?); + if (jsonObject["is_hasura_mapping_enabled"] != null) + { + isHasuraMappingEnabled = jsonObject["is_hasura_mapping_enabled"].ToObject(serializer); + } + + return new UpdateApplicationTokensRequest( + accessTokenLifetime: accessTokenLifetime != null ? new Option(accessTokenLifetime) : default, refreshTokenLifetime: refreshTokenLifetime != null ? new Option(refreshTokenLifetime) : default, idTokenLifetime: idTokenLifetime != null ? new Option(idTokenLifetime) : default, authenticatedSessionLifetime: authenticatedSessionLifetime != null ? new Option(authenticatedSessionLifetime) : default, isHasuraMappingEnabled: isHasuraMappingEnabled != null ? new Option(isHasuraMappingEnabled) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateApplicationTokensRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.AccessTokenLifetimeOption.IsSet && value.AccessTokenLifetime != null) + { + writer.WritePropertyName("access_token_lifetime"); + serializer.Serialize(writer, value.AccessTokenLifetime); + } + if (value.RefreshTokenLifetimeOption.IsSet && value.RefreshTokenLifetime != null) + { + writer.WritePropertyName("refresh_token_lifetime"); + serializer.Serialize(writer, value.RefreshTokenLifetime); + } + if (value.IdTokenLifetimeOption.IsSet && value.IdTokenLifetime != null) + { + writer.WritePropertyName("id_token_lifetime"); + serializer.Serialize(writer, value.IdTokenLifetime); + } + if (value.AuthenticatedSessionLifetimeOption.IsSet && value.AuthenticatedSessionLifetime != null) + { + writer.WritePropertyName("authenticated_session_lifetime"); + serializer.Serialize(writer, value.AuthenticatedSessionLifetime); + } + if (value.IsHasuraMappingEnabledOption.IsSet && value.IsHasuraMappingEnabled != null) + { + writer.WritePropertyName("is_hasura_mapping_enabled"); + serializer.Serialize(writer, value.IsHasuraMappingEnabled); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateApplicationsPropertyRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateApplicationsPropertyRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..a6268cc --- /dev/null +++ b/Kinde.Api/Converters/UpdateApplicationsPropertyRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateApplicationsPropertyRequest that handles the Option<> structure + /// + public class UpdateApplicationsPropertyRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateApplicationsPropertyRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateApplicationsPropertyRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + UpdateApplicationsPropertyRequestValue value = default(UpdateApplicationsPropertyRequestValue); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(serializer); + } + + return new UpdateApplicationsPropertyRequest( + value: value ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateApplicationsPropertyRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateBusinessRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateBusinessRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..a8131a6 --- /dev/null +++ b/Kinde.Api/Converters/UpdateBusinessRequestNewtonsoftConverter.cs @@ -0,0 +1,140 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateBusinessRequest that handles the Option<> structure + /// + public class UpdateBusinessRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateBusinessRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateBusinessRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? businessName = default(string?); + if (jsonObject["business_name"] != null) + { + businessName = jsonObject["business_name"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? industryKey = default(string?); + if (jsonObject["industry_key"] != null) + { + industryKey = jsonObject["industry_key"].ToObject(); + } + bool? isClickWrap = default(bool?); + if (jsonObject["is_click_wrap"] != null) + { + isClickWrap = jsonObject["is_click_wrap"].ToObject(serializer); + } + bool? isShowKindeBranding = default(bool?); + if (jsonObject["is_show_kinde_branding"] != null) + { + isShowKindeBranding = jsonObject["is_show_kinde_branding"].ToObject(serializer); + } + string? kindePerkCode = default(string?); + if (jsonObject["kinde_perk_code"] != null) + { + kindePerkCode = jsonObject["kinde_perk_code"].ToObject(); + } + string? phone = default(string?); + if (jsonObject["phone"] != null) + { + phone = jsonObject["phone"].ToObject(); + } + string? privacyUrl = default(string?); + if (jsonObject["privacy_url"] != null) + { + privacyUrl = jsonObject["privacy_url"].ToObject(); + } + string? termsUrl = default(string?); + if (jsonObject["terms_url"] != null) + { + termsUrl = jsonObject["terms_url"].ToObject(); + } + string? timezoneKey = default(string?); + if (jsonObject["timezone_key"] != null) + { + timezoneKey = jsonObject["timezone_key"].ToObject(); + } + + return new UpdateBusinessRequest( + businessName: businessName != null ? new Option(businessName) : default, email: email != null ? new Option(email) : default, industryKey: industryKey != null ? new Option(industryKey) : default, isClickWrap: isClickWrap != null ? new Option(isClickWrap) : default, isShowKindeBranding: isShowKindeBranding != null ? new Option(isShowKindeBranding) : default, kindePerkCode: kindePerkCode != null ? new Option(kindePerkCode) : default, phone: phone != null ? new Option(phone) : default, privacyUrl: privacyUrl != null ? new Option(privacyUrl) : default, termsUrl: termsUrl != null ? new Option(termsUrl) : default, timezoneKey: timezoneKey != null ? new Option(timezoneKey) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateBusinessRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.BusinessNameOption.IsSet && value.BusinessName != null) + { + writer.WritePropertyName("business_name"); + serializer.Serialize(writer, value.BusinessName); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.IndustryKeyOption.IsSet && value.IndustryKey != null) + { + writer.WritePropertyName("industry_key"); + serializer.Serialize(writer, value.IndustryKey); + } + if (value.IsClickWrapOption.IsSet && value.IsClickWrap != null) + { + writer.WritePropertyName("is_click_wrap"); + serializer.Serialize(writer, value.IsClickWrap); + } + if (value.IsShowKindeBrandingOption.IsSet && value.IsShowKindeBranding != null) + { + writer.WritePropertyName("is_show_kinde_branding"); + serializer.Serialize(writer, value.IsShowKindeBranding); + } + if (value.KindePerkCodeOption.IsSet && value.KindePerkCode != null) + { + writer.WritePropertyName("kinde_perk_code"); + serializer.Serialize(writer, value.KindePerkCode); + } + if (value.PhoneOption.IsSet && value.Phone != null) + { + writer.WritePropertyName("phone"); + serializer.Serialize(writer, value.Phone); + } + if (value.PrivacyUrlOption.IsSet && value.PrivacyUrl != null) + { + writer.WritePropertyName("privacy_url"); + serializer.Serialize(writer, value.PrivacyUrl); + } + if (value.TermsUrlOption.IsSet && value.TermsUrl != null) + { + writer.WritePropertyName("terms_url"); + serializer.Serialize(writer, value.TermsUrl); + } + if (value.TimezoneKeyOption.IsSet && value.TimezoneKey != null) + { + writer.WritePropertyName("timezone_key"); + serializer.Serialize(writer, value.TimezoneKey); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateCategoryRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateCategoryRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..c027e2e --- /dev/null +++ b/Kinde.Api/Converters/UpdateCategoryRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateCategoryRequest that handles the Option<> structure + /// + public class UpdateCategoryRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateCategoryRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateCategoryRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + + return new UpdateCategoryRequest( + name: name != null ? new Option(name) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateCategoryRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateConnectionRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateConnectionRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..bdd4409 --- /dev/null +++ b/Kinde.Api/Converters/UpdateConnectionRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateConnectionRequest that handles the Option<> structure + /// + public class UpdateConnectionRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateConnectionRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateConnectionRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? displayName = default(string?); + if (jsonObject["display_name"] != null) + { + displayName = jsonObject["display_name"].ToObject(); + } + List enabledApplications = default(List); + if (jsonObject["enabled_applications"] != null) + { + enabledApplications = jsonObject["enabled_applications"].ToObject>(serializer); + } + UpdateConnectionRequestOptions? options = default(UpdateConnectionRequestOptions?); + if (jsonObject["options"] != null) + { + options = jsonObject["options"].ToObject(serializer); + } + + return new UpdateConnectionRequest( + name: name != null ? new Option(name) : default, displayName: displayName != null ? new Option(displayName) : default, enabledApplications: enabledApplications != null ? new Option?>(enabledApplications) : default, options: options != null ? new Option(options) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateConnectionRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DisplayNameOption.IsSet && value.DisplayName != null) + { + writer.WritePropertyName("display_name"); + serializer.Serialize(writer, value.DisplayName); + } + if (value.EnabledApplicationsOption.IsSet) + { + writer.WritePropertyName("enabled_applications"); + serializer.Serialize(writer, value.EnabledApplications); + } + if (value.OptionsOption.IsSet && value.Options != null) + { + writer.WritePropertyName("options"); + serializer.Serialize(writer, value.Options); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateConnectionRequestOptionsOneOfNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateConnectionRequestOptionsOneOfNewtonsoftConverter.cs new file mode 100644 index 0000000..ffb0a37 --- /dev/null +++ b/Kinde.Api/Converters/UpdateConnectionRequestOptionsOneOfNewtonsoftConverter.cs @@ -0,0 +1,150 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateConnectionRequestOptionsOneOf that handles the Option<> structure + /// + public class UpdateConnectionRequestOptionsOneOfNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateConnectionRequestOptionsOneOf ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateConnectionRequestOptionsOneOf existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? clientId = default(string?); + if (jsonObject["client_id"] != null) + { + clientId = jsonObject["client_id"].ToObject(); + } + string? clientSecret = default(string?); + if (jsonObject["client_secret"] != null) + { + clientSecret = jsonObject["client_secret"].ToObject(); + } + List homeRealmDomains = default(List); + if (jsonObject["home_realm_domains"] != null) + { + homeRealmDomains = jsonObject["home_realm_domains"].ToObject>(serializer); + } + string? entraIdDomain = default(string?); + if (jsonObject["entra_id_domain"] != null) + { + entraIdDomain = jsonObject["entra_id_domain"].ToObject(); + } + bool? isUseCommonEndpoint = default(bool?); + if (jsonObject["is_use_common_endpoint"] != null) + { + isUseCommonEndpoint = jsonObject["is_use_common_endpoint"].ToObject(serializer); + } + bool? isSyncUserProfileOnLogin = default(bool?); + if (jsonObject["is_sync_user_profile_on_login"] != null) + { + isSyncUserProfileOnLogin = jsonObject["is_sync_user_profile_on_login"].ToObject(serializer); + } + bool? isRetrieveProviderUserGroups = default(bool?); + if (jsonObject["is_retrieve_provider_user_groups"] != null) + { + isRetrieveProviderUserGroups = jsonObject["is_retrieve_provider_user_groups"].ToObject(serializer); + } + bool? isExtendedAttributesRequired = default(bool?); + if (jsonObject["is_extended_attributes_required"] != null) + { + isExtendedAttributesRequired = jsonObject["is_extended_attributes_required"].ToObject(serializer); + } + bool? isCreateMissingUser = default(bool?); + if (jsonObject["is_create_missing_user"] != null) + { + isCreateMissingUser = jsonObject["is_create_missing_user"].ToObject(serializer); + } + bool? isForceShowSsoButton = default(bool?); + if (jsonObject["is_force_show_sso_button"] != null) + { + isForceShowSsoButton = jsonObject["is_force_show_sso_button"].ToObject(serializer); + } + Dictionary upstreamParams = default(Dictionary); + if (jsonObject["upstream_params"] != null) + { + upstreamParams = jsonObject["upstream_params"].ToObject>(serializer); + } + + return new UpdateConnectionRequestOptionsOneOf( + clientId: clientId != null ? new Option(clientId) : default, clientSecret: clientSecret != null ? new Option(clientSecret) : default, homeRealmDomains: homeRealmDomains != null ? new Option?>(homeRealmDomains) : default, entraIdDomain: entraIdDomain != null ? new Option(entraIdDomain) : default, isUseCommonEndpoint: isUseCommonEndpoint != null ? new Option(isUseCommonEndpoint) : default, isSyncUserProfileOnLogin: isSyncUserProfileOnLogin != null ? new Option(isSyncUserProfileOnLogin) : default, isRetrieveProviderUserGroups: isRetrieveProviderUserGroups != null ? new Option(isRetrieveProviderUserGroups) : default, isExtendedAttributesRequired: isExtendedAttributesRequired != null ? new Option(isExtendedAttributesRequired) : default, isCreateMissingUser: isCreateMissingUser != null ? new Option(isCreateMissingUser) : default, isForceShowSsoButton: isForceShowSsoButton != null ? new Option(isForceShowSsoButton) : default, upstreamParams: upstreamParams != null ? new Option>(upstreamParams) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateConnectionRequestOptionsOneOf value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ClientIdOption.IsSet && value.ClientId != null) + { + writer.WritePropertyName("client_id"); + serializer.Serialize(writer, value.ClientId); + } + if (value.ClientSecretOption.IsSet && value.ClientSecret != null) + { + writer.WritePropertyName("client_secret"); + serializer.Serialize(writer, value.ClientSecret); + } + if (value.HomeRealmDomainsOption.IsSet) + { + writer.WritePropertyName("home_realm_domains"); + serializer.Serialize(writer, value.HomeRealmDomains); + } + if (value.EntraIdDomainOption.IsSet && value.EntraIdDomain != null) + { + writer.WritePropertyName("entra_id_domain"); + serializer.Serialize(writer, value.EntraIdDomain); + } + if (value.IsUseCommonEndpointOption.IsSet && value.IsUseCommonEndpoint != null) + { + writer.WritePropertyName("is_use_common_endpoint"); + serializer.Serialize(writer, value.IsUseCommonEndpoint); + } + if (value.IsSyncUserProfileOnLoginOption.IsSet && value.IsSyncUserProfileOnLogin != null) + { + writer.WritePropertyName("is_sync_user_profile_on_login"); + serializer.Serialize(writer, value.IsSyncUserProfileOnLogin); + } + if (value.IsRetrieveProviderUserGroupsOption.IsSet && value.IsRetrieveProviderUserGroups != null) + { + writer.WritePropertyName("is_retrieve_provider_user_groups"); + serializer.Serialize(writer, value.IsRetrieveProviderUserGroups); + } + if (value.IsExtendedAttributesRequiredOption.IsSet && value.IsExtendedAttributesRequired != null) + { + writer.WritePropertyName("is_extended_attributes_required"); + serializer.Serialize(writer, value.IsExtendedAttributesRequired); + } + if (value.IsCreateMissingUserOption.IsSet && value.IsCreateMissingUser != null) + { + writer.WritePropertyName("is_create_missing_user"); + serializer.Serialize(writer, value.IsCreateMissingUser); + } + if (value.IsForceShowSsoButtonOption.IsSet && value.IsForceShowSsoButton != null) + { + writer.WritePropertyName("is_force_show_sso_button"); + serializer.Serialize(writer, value.IsForceShowSsoButton); + } + if (value.UpstreamParamsOption.IsSet) + { + writer.WritePropertyName("upstream_params"); + serializer.Serialize(writer, value.UpstreamParams); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateEnvironementFeatureFlagOverrideRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateEnvironementFeatureFlagOverrideRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..d625cc8 --- /dev/null +++ b/Kinde.Api/Converters/UpdateEnvironementFeatureFlagOverrideRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateEnvironementFeatureFlagOverrideRequest that handles the Option<> structure + /// + public class UpdateEnvironementFeatureFlagOverrideRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateEnvironementFeatureFlagOverrideRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateEnvironementFeatureFlagOverrideRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string value = default(string); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + + return new UpdateEnvironementFeatureFlagOverrideRequest( + value: value ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateEnvironementFeatureFlagOverrideRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateEnvironmentVariableRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateEnvironmentVariableRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..24f39d8 --- /dev/null +++ b/Kinde.Api/Converters/UpdateEnvironmentVariableRequestNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateEnvironmentVariableRequest that handles the Option<> structure + /// + public class UpdateEnvironmentVariableRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateEnvironmentVariableRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateEnvironmentVariableRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? key = default(string?); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + string? value = default(string?); + if (jsonObject["value"] != null) + { + value = jsonObject["value"].ToObject(); + } + bool? isSecret = default(bool?); + if (jsonObject["is_secret"] != null) + { + isSecret = jsonObject["is_secret"].ToObject(serializer); + } + + return new UpdateEnvironmentVariableRequest( + key: key != null ? new Option(key) : default, value: value != null ? new Option(value) : default, isSecret: isSecret != null ? new Option(isSecret) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateEnvironmentVariableRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.KeyOption.IsSet && value.Key != null) + { + writer.WritePropertyName("key"); + serializer.Serialize(writer, value.Key); + } + if (value.ValueOption.IsSet && value.Value != null) + { + writer.WritePropertyName("value"); + serializer.Serialize(writer, value.Value); + } + if (value.IsSecretOption.IsSet && value.IsSecret != null) + { + writer.WritePropertyName("is_secret"); + serializer.Serialize(writer, value.IsSecret); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateEnvironmentVariableResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateEnvironmentVariableResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..0bedecb --- /dev/null +++ b/Kinde.Api/Converters/UpdateEnvironmentVariableResponseNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateEnvironmentVariableResponse that handles the Option<> structure + /// + public class UpdateEnvironmentVariableResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateEnvironmentVariableResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateEnvironmentVariableResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + + return new UpdateEnvironmentVariableResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateEnvironmentVariableResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateIdentityRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateIdentityRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..3b34f3b --- /dev/null +++ b/Kinde.Api/Converters/UpdateIdentityRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateIdentityRequest that handles the Option<> structure + /// + public class UpdateIdentityRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateIdentityRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateIdentityRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? isPrimary = default(bool?); + if (jsonObject["is_primary"] != null) + { + isPrimary = jsonObject["is_primary"].ToObject(serializer); + } + + return new UpdateIdentityRequest( + isPrimary: isPrimary != null ? new Option(isPrimary) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateIdentityRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IsPrimaryOption.IsSet && value.IsPrimary != null) + { + writer.WritePropertyName("is_primary"); + serializer.Serialize(writer, value.IsPrimary); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationPropertiesRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationPropertiesRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..c0eba3d --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationPropertiesRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationPropertiesRequest that handles the Option<> structure + /// + public class UpdateOrganizationPropertiesRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationPropertiesRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationPropertiesRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + Object properties = default(Object); + if (jsonObject["properties"] != null) + { + properties = jsonObject["properties"].ToObject(serializer); + } + + return new UpdateOrganizationPropertiesRequest( + properties: properties ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationPropertiesRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..43fe2ea --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationRequestNewtonsoftConverter.cs @@ -0,0 +1,225 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationRequest that handles the Option<> structure + /// + public class UpdateOrganizationRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + UpdateOrganizationRequest.ThemeCodeEnum? themeCode = default(UpdateOrganizationRequest.ThemeCodeEnum?); + if (jsonObject["theme_code"] != null) + { + var themeCodeStr = jsonObject["theme_code"].ToObject(); + if (!string.IsNullOrEmpty(themeCodeStr)) + { + themeCode = UpdateOrganizationRequest.ThemeCodeEnumFromString(themeCodeStr); + } + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? externalId = default(string?); + if (jsonObject["external_id"] != null) + { + externalId = jsonObject["external_id"].ToObject(); + } + string? backgroundColor = default(string?); + if (jsonObject["background_color"] != null) + { + backgroundColor = jsonObject["background_color"].ToObject(); + } + string? buttonColor = default(string?); + if (jsonObject["button_color"] != null) + { + buttonColor = jsonObject["button_color"].ToObject(); + } + string? buttonTextColor = default(string?); + if (jsonObject["button_text_color"] != null) + { + buttonTextColor = jsonObject["button_text_color"].ToObject(); + } + string? linkColor = default(string?); + if (jsonObject["link_color"] != null) + { + linkColor = jsonObject["link_color"].ToObject(); + } + string? backgroundColorDark = default(string?); + if (jsonObject["background_color_dark"] != null) + { + backgroundColorDark = jsonObject["background_color_dark"].ToObject(); + } + string? buttonColorDark = default(string?); + if (jsonObject["button_color_dark"] != null) + { + buttonColorDark = jsonObject["button_color_dark"].ToObject(); + } + string? buttonTextColorDark = default(string?); + if (jsonObject["button_text_color_dark"] != null) + { + buttonTextColorDark = jsonObject["button_text_color_dark"].ToObject(); + } + string? linkColorDark = default(string?); + if (jsonObject["link_color_dark"] != null) + { + linkColorDark = jsonObject["link_color_dark"].ToObject(); + } + string? handle = default(string?); + if (jsonObject["handle"] != null) + { + handle = jsonObject["handle"].ToObject(); + } + bool? isAutoJoinDomainList = default(bool?); + if (jsonObject["is_auto_join_domain_list"] != null) + { + isAutoJoinDomainList = jsonObject["is_auto_join_domain_list"].ToObject(serializer); + } + List allowedDomains = default(List); + if (jsonObject["allowed_domains"] != null) + { + allowedDomains = jsonObject["allowed_domains"].ToObject>(serializer); + } + bool? isEnableAdvancedOrgs = default(bool?); + if (jsonObject["is_enable_advanced_orgs"] != null) + { + isEnableAdvancedOrgs = jsonObject["is_enable_advanced_orgs"].ToObject(serializer); + } + bool? isEnforceMfa = default(bool?); + if (jsonObject["is_enforce_mfa"] != null) + { + isEnforceMfa = jsonObject["is_enforce_mfa"].ToObject(serializer); + } + string? senderName = default(string?); + if (jsonObject["sender_name"] != null) + { + senderName = jsonObject["sender_name"].ToObject(); + } + string? senderEmail = default(string?); + if (jsonObject["sender_email"] != null) + { + senderEmail = jsonObject["sender_email"].ToObject(); + } + + return new UpdateOrganizationRequest( + themeCode: themeCode != null ? new Option(themeCode) : default, name: name != null ? new Option(name) : default, externalId: externalId != null ? new Option(externalId) : default, backgroundColor: backgroundColor != null ? new Option(backgroundColor) : default, buttonColor: buttonColor != null ? new Option(buttonColor) : default, buttonTextColor: buttonTextColor != null ? new Option(buttonTextColor) : default, linkColor: linkColor != null ? new Option(linkColor) : default, backgroundColorDark: backgroundColorDark != null ? new Option(backgroundColorDark) : default, buttonColorDark: buttonColorDark != null ? new Option(buttonColorDark) : default, buttonTextColorDark: buttonTextColorDark != null ? new Option(buttonTextColorDark) : default, linkColorDark: linkColorDark != null ? new Option(linkColorDark) : default, handle: handle != null ? new Option(handle) : default, isAutoJoinDomainList: isAutoJoinDomainList != null ? new Option(isAutoJoinDomainList) : default, allowedDomains: allowedDomains != null ? new Option?>(allowedDomains) : default, isEnableAdvancedOrgs: isEnableAdvancedOrgs != null ? new Option(isEnableAdvancedOrgs) : default, isEnforceMfa: isEnforceMfa != null ? new Option(isEnforceMfa) : default, senderName: senderName != null ? new Option(senderName) : default, senderEmail: senderEmail != null ? new Option(senderEmail) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.ThemeCodeOption.IsSet && value.ThemeCode != null) + { + writer.WritePropertyName("theme_code"); + var themeCodeStr = UpdateOrganizationRequest.ThemeCodeEnumToJsonValue(value.ThemeCode.Value); + writer.WriteValue(themeCodeStr); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.ExternalIdOption.IsSet && value.ExternalId != null) + { + writer.WritePropertyName("external_id"); + serializer.Serialize(writer, value.ExternalId); + } + if (value.BackgroundColorOption.IsSet && value.BackgroundColor != null) + { + writer.WritePropertyName("background_color"); + serializer.Serialize(writer, value.BackgroundColor); + } + if (value.ButtonColorOption.IsSet && value.ButtonColor != null) + { + writer.WritePropertyName("button_color"); + serializer.Serialize(writer, value.ButtonColor); + } + if (value.ButtonTextColorOption.IsSet && value.ButtonTextColor != null) + { + writer.WritePropertyName("button_text_color"); + serializer.Serialize(writer, value.ButtonTextColor); + } + if (value.LinkColorOption.IsSet && value.LinkColor != null) + { + writer.WritePropertyName("link_color"); + serializer.Serialize(writer, value.LinkColor); + } + if (value.BackgroundColorDarkOption.IsSet && value.BackgroundColorDark != null) + { + writer.WritePropertyName("background_color_dark"); + serializer.Serialize(writer, value.BackgroundColorDark); + } + if (value.ButtonColorDarkOption.IsSet && value.ButtonColorDark != null) + { + writer.WritePropertyName("button_color_dark"); + serializer.Serialize(writer, value.ButtonColorDark); + } + if (value.ButtonTextColorDarkOption.IsSet && value.ButtonTextColorDark != null) + { + writer.WritePropertyName("button_text_color_dark"); + serializer.Serialize(writer, value.ButtonTextColorDark); + } + if (value.LinkColorDarkOption.IsSet && value.LinkColorDark != null) + { + writer.WritePropertyName("link_color_dark"); + serializer.Serialize(writer, value.LinkColorDark); + } + if (value.HandleOption.IsSet && value.Handle != null) + { + writer.WritePropertyName("handle"); + serializer.Serialize(writer, value.Handle); + } + if (value.IsAutoJoinDomainListOption.IsSet && value.IsAutoJoinDomainList != null) + { + writer.WritePropertyName("is_auto_join_domain_list"); + serializer.Serialize(writer, value.IsAutoJoinDomainList); + } + if (value.AllowedDomainsOption.IsSet) + { + writer.WritePropertyName("allowed_domains"); + serializer.Serialize(writer, value.AllowedDomains); + } + if (value.IsEnableAdvancedOrgsOption.IsSet && value.IsEnableAdvancedOrgs != null) + { + writer.WritePropertyName("is_enable_advanced_orgs"); + serializer.Serialize(writer, value.IsEnableAdvancedOrgs); + } + if (value.IsEnforceMfaOption.IsSet && value.IsEnforceMfa != null) + { + writer.WritePropertyName("is_enforce_mfa"); + serializer.Serialize(writer, value.IsEnforceMfa); + } + if (value.SenderNameOption.IsSet && value.SenderName != null) + { + writer.WritePropertyName("sender_name"); + serializer.Serialize(writer, value.SenderName); + } + if (value.SenderEmailOption.IsSet && value.SenderEmail != null) + { + writer.WritePropertyName("sender_email"); + serializer.Serialize(writer, value.SenderEmail); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationSessionsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationSessionsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..cb056af --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationSessionsRequestNewtonsoftConverter.cs @@ -0,0 +1,85 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationSessionsRequest that handles the Option<> structure + /// + public class UpdateOrganizationSessionsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationSessionsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationSessionsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + UpdateOrganizationSessionsRequest.SsoSessionPersistenceModeEnum? ssoSessionPersistenceMode = default(UpdateOrganizationSessionsRequest.SsoSessionPersistenceModeEnum?); + if (jsonObject["sso_session_persistence_mode"] != null) + { + var ssoSessionPersistenceModeStr = jsonObject["sso_session_persistence_mode"].ToObject(); + if (!string.IsNullOrEmpty(ssoSessionPersistenceModeStr)) + { + ssoSessionPersistenceMode = UpdateOrganizationSessionsRequest.SsoSessionPersistenceModeEnumFromString(ssoSessionPersistenceModeStr); + } + } + bool? isUseOrgSsoSessionPolicy = default(bool?); + if (jsonObject["is_use_org_sso_session_policy"] != null) + { + isUseOrgSsoSessionPolicy = jsonObject["is_use_org_sso_session_policy"].ToObject(serializer); + } + bool? isUseOrgAuthenticatedSessionLifetime = default(bool?); + if (jsonObject["is_use_org_authenticated_session_lifetime"] != null) + { + isUseOrgAuthenticatedSessionLifetime = jsonObject["is_use_org_authenticated_session_lifetime"].ToObject(serializer); + } + int? authenticatedSessionLifetime = default(int?); + if (jsonObject["authenticated_session_lifetime"] != null) + { + authenticatedSessionLifetime = jsonObject["authenticated_session_lifetime"].ToObject(serializer); + } + + return new UpdateOrganizationSessionsRequest( + ssoSessionPersistenceMode: ssoSessionPersistenceMode != null ? new Option(ssoSessionPersistenceMode) : default, isUseOrgSsoSessionPolicy: isUseOrgSsoSessionPolicy != null ? new Option(isUseOrgSsoSessionPolicy) : default, isUseOrgAuthenticatedSessionLifetime: isUseOrgAuthenticatedSessionLifetime != null ? new Option(isUseOrgAuthenticatedSessionLifetime) : default, authenticatedSessionLifetime: authenticatedSessionLifetime != null ? new Option(authenticatedSessionLifetime) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationSessionsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.SsoSessionPersistenceModeOption.IsSet && value.SsoSessionPersistenceMode != null) + { + writer.WritePropertyName("sso_session_persistence_mode"); + var ssoSessionPersistenceModeStr = UpdateOrganizationSessionsRequest.SsoSessionPersistenceModeEnumToJsonValue(value.SsoSessionPersistenceMode.Value); + writer.WriteValue(ssoSessionPersistenceModeStr); + } + if (value.IsUseOrgSsoSessionPolicyOption.IsSet && value.IsUseOrgSsoSessionPolicy != null) + { + writer.WritePropertyName("is_use_org_sso_session_policy"); + serializer.Serialize(writer, value.IsUseOrgSsoSessionPolicy); + } + if (value.IsUseOrgAuthenticatedSessionLifetimeOption.IsSet && value.IsUseOrgAuthenticatedSessionLifetime != null) + { + writer.WritePropertyName("is_use_org_authenticated_session_lifetime"); + serializer.Serialize(writer, value.IsUseOrgAuthenticatedSessionLifetime); + } + if (value.AuthenticatedSessionLifetimeOption.IsSet && value.AuthenticatedSessionLifetime != null) + { + writer.WritePropertyName("authenticated_session_lifetime"); + serializer.Serialize(writer, value.AuthenticatedSessionLifetime); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationUsersRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationUsersRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..dfdeaa0 --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationUsersRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationUsersRequest that handles the Option<> structure + /// + public class UpdateOrganizationUsersRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationUsersRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationUsersRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List users = default(List); + if (jsonObject["users"] != null) + { + users = jsonObject["users"].ToObject>(serializer); + } + + return new UpdateOrganizationUsersRequest( + users: users != null ? new Option?>(users) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationUsersRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.UsersOption.IsSet) + { + writer.WritePropertyName("users"); + serializer.Serialize(writer, value.Users); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..418a403 --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationUsersRequestUsersInnerNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationUsersRequestUsersInner that handles the Option<> structure + /// + public class UpdateOrganizationUsersRequestUsersInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationUsersRequestUsersInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationUsersRequestUsersInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? operation = default(string?); + if (jsonObject["operation"] != null) + { + operation = jsonObject["operation"].ToObject(); + } + List roles = default(List); + if (jsonObject["roles"] != null) + { + roles = jsonObject["roles"].ToObject>(serializer); + } + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + + return new UpdateOrganizationUsersRequestUsersInner( + id: id != null ? new Option(id) : default, operation: operation != null ? new Option(operation) : default, roles: roles != null ? new Option?>(roles) : default, permissions: permissions != null ? new Option?>(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationUsersRequestUsersInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.OperationOption.IsSet && value.Operation != null) + { + writer.WritePropertyName("operation"); + serializer.Serialize(writer, value.Operation); + } + if (value.RolesOption.IsSet) + { + writer.WritePropertyName("roles"); + serializer.Serialize(writer, value.Roles); + } + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateOrganizationUsersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateOrganizationUsersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..152ff2c --- /dev/null +++ b/Kinde.Api/Converters/UpdateOrganizationUsersResponseNewtonsoftConverter.cs @@ -0,0 +1,90 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateOrganizationUsersResponse that handles the Option<> structure + /// + public class UpdateOrganizationUsersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateOrganizationUsersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateOrganizationUsersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + List usersAdded = default(List); + if (jsonObject["users_added"] != null) + { + usersAdded = jsonObject["users_added"].ToObject>(serializer); + } + List usersUpdated = default(List); + if (jsonObject["users_updated"] != null) + { + usersUpdated = jsonObject["users_updated"].ToObject>(serializer); + } + List usersRemoved = default(List); + if (jsonObject["users_removed"] != null) + { + usersRemoved = jsonObject["users_removed"].ToObject>(serializer); + } + + return new UpdateOrganizationUsersResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, usersAdded: usersAdded != null ? new Option?>(usersAdded) : default, usersUpdated: usersUpdated != null ? new Option?>(usersUpdated) : default, usersRemoved: usersRemoved != null ? new Option?>(usersRemoved) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateOrganizationUsersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.UsersAddedOption.IsSet) + { + writer.WritePropertyName("users_added"); + serializer.Serialize(writer, value.UsersAdded); + } + if (value.UsersUpdatedOption.IsSet) + { + writer.WritePropertyName("users_updated"); + serializer.Serialize(writer, value.UsersUpdated); + } + if (value.UsersRemovedOption.IsSet) + { + writer.WritePropertyName("users_removed"); + serializer.Serialize(writer, value.UsersRemoved); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdatePropertyRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdatePropertyRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..3daf2af --- /dev/null +++ b/Kinde.Api/Converters/UpdatePropertyRequestNewtonsoftConverter.cs @@ -0,0 +1,65 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdatePropertyRequest that handles the Option<> structure + /// + public class UpdatePropertyRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdatePropertyRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdatePropertyRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + bool isPrivate = default(bool); + if (jsonObject["is_private"] != null) + { + isPrivate = jsonObject["is_private"].ToObject(serializer); + } + string categoryId = default(string); + if (jsonObject["category_id"] != null) + { + categoryId = jsonObject["category_id"].ToObject(); + } + + return new UpdatePropertyRequest( + description: description != null ? new Option(description) : default, name: name, isPrivate: isPrivate, categoryId: categoryId ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdatePropertyRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateRolePermissionsRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateRolePermissionsRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..593d3c6 --- /dev/null +++ b/Kinde.Api/Converters/UpdateRolePermissionsRequestNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateRolePermissionsRequest that handles the Option<> structure + /// + public class UpdateRolePermissionsRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateRolePermissionsRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateRolePermissionsRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List permissions = default(List); + if (jsonObject["permissions"] != null) + { + permissions = jsonObject["permissions"].ToObject>(serializer); + } + + return new UpdateRolePermissionsRequest( + permissions: permissions != null ? new Option?>(permissions) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateRolePermissionsRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.PermissionsOption.IsSet) + { + writer.WritePropertyName("permissions"); + serializer.Serialize(writer, value.Permissions); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateRolePermissionsRequestPermissionsInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateRolePermissionsRequestPermissionsInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..eaa553b --- /dev/null +++ b/Kinde.Api/Converters/UpdateRolePermissionsRequestPermissionsInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateRolePermissionsRequestPermissionsInner that handles the Option<> structure + /// + public class UpdateRolePermissionsRequestPermissionsInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateRolePermissionsRequestPermissionsInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateRolePermissionsRequestPermissionsInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? operation = default(string?); + if (jsonObject["operation"] != null) + { + operation = jsonObject["operation"].ToObject(); + } + + return new UpdateRolePermissionsRequestPermissionsInner( + id: id != null ? new Option(id) : default, operation: operation != null ? new Option(operation) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateRolePermissionsRequestPermissionsInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.OperationOption.IsSet && value.Operation != null) + { + writer.WritePropertyName("operation"); + serializer.Serialize(writer, value.Operation); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateRolePermissionsResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateRolePermissionsResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..3966a9e --- /dev/null +++ b/Kinde.Api/Converters/UpdateRolePermissionsResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateRolePermissionsResponse that handles the Option<> structure + /// + public class UpdateRolePermissionsResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateRolePermissionsResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateRolePermissionsResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List permissionsAdded = default(List); + if (jsonObject["permissions_added"] != null) + { + permissionsAdded = jsonObject["permissions_added"].ToObject>(serializer); + } + List permissionsRemoved = default(List); + if (jsonObject["permissions_removed"] != null) + { + permissionsRemoved = jsonObject["permissions_removed"].ToObject>(serializer); + } + + return new UpdateRolePermissionsResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, permissionsAdded: permissionsAdded != null ? new Option?>(permissionsAdded) : default, permissionsRemoved: permissionsRemoved != null ? new Option?>(permissionsRemoved) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateRolePermissionsResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.PermissionsAddedOption.IsSet) + { + writer.WritePropertyName("permissions_added"); + serializer.Serialize(writer, value.PermissionsAdded); + } + if (value.PermissionsRemovedOption.IsSet) + { + writer.WritePropertyName("permissions_removed"); + serializer.Serialize(writer, value.PermissionsRemoved); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateRolesRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateRolesRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..485d249 --- /dev/null +++ b/Kinde.Api/Converters/UpdateRolesRequestNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateRolesRequest that handles the Option<> structure + /// + public class UpdateRolesRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateRolesRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateRolesRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + bool? isDefaultRole = default(bool?); + if (jsonObject["is_default_role"] != null) + { + isDefaultRole = jsonObject["is_default_role"].ToObject(serializer); + } + Guid? assignmentPermissionId = default(Guid?); + if (jsonObject["assignment_permission_id"] != null) + { + assignmentPermissionId = jsonObject["assignment_permission_id"].ToObject(serializer); + } + string name = default(string); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string key = default(string); + if (jsonObject["key"] != null) + { + key = jsonObject["key"].ToObject(); + } + + return new UpdateRolesRequest( + description: description != null ? new Option(description) : default, isDefaultRole: isDefaultRole != null ? new Option(isDefaultRole) : default, assignmentPermissionId: assignmentPermissionId != null ? new Option(assignmentPermissionId) : default, name: name, key: key ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateRolesRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.IsDefaultRoleOption.IsSet && value.IsDefaultRole != null) + { + writer.WritePropertyName("is_default_role"); + serializer.Serialize(writer, value.IsDefaultRole); + } + if (value.AssignmentPermissionIdOption.IsSet && value.AssignmentPermissionId != null) + { + writer.WritePropertyName("assignment_permission_id"); + serializer.Serialize(writer, value.AssignmentPermissionId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateUserRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateUserRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..b43bf43 --- /dev/null +++ b/Kinde.Api/Converters/UpdateUserRequestNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateUserRequest that handles the Option<> structure + /// + public class UpdateUserRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateUserRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateUserRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? givenName = default(string?); + if (jsonObject["given_name"] != null) + { + givenName = jsonObject["given_name"].ToObject(); + } + string? familyName = default(string?); + if (jsonObject["family_name"] != null) + { + familyName = jsonObject["family_name"].ToObject(); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + bool? isSuspended = default(bool?); + if (jsonObject["is_suspended"] != null) + { + isSuspended = jsonObject["is_suspended"].ToObject(serializer); + } + bool? isPasswordResetRequested = default(bool?); + if (jsonObject["is_password_reset_requested"] != null) + { + isPasswordResetRequested = jsonObject["is_password_reset_requested"].ToObject(serializer); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + + return new UpdateUserRequest( + givenName: givenName != null ? new Option(givenName) : default, familyName: familyName != null ? new Option(familyName) : default, picture: picture != null ? new Option(picture) : default, isSuspended: isSuspended != null ? new Option(isSuspended) : default, isPasswordResetRequested: isPasswordResetRequested != null ? new Option(isPasswordResetRequested) : default, providedId: providedId != null ? new Option(providedId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateUserRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.GivenNameOption.IsSet && value.GivenName != null) + { + writer.WritePropertyName("given_name"); + serializer.Serialize(writer, value.GivenName); + } + if (value.FamilyNameOption.IsSet && value.FamilyName != null) + { + writer.WritePropertyName("family_name"); + serializer.Serialize(writer, value.FamilyName); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.IsSuspendedOption.IsSet && value.IsSuspended != null) + { + writer.WritePropertyName("is_suspended"); + serializer.Serialize(writer, value.IsSuspended); + } + if (value.IsPasswordResetRequestedOption.IsSet && value.IsPasswordResetRequested != null) + { + writer.WritePropertyName("is_password_reset_requested"); + serializer.Serialize(writer, value.IsPasswordResetRequested); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateUserResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateUserResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..67a4fe5 --- /dev/null +++ b/Kinde.Api/Converters/UpdateUserResponseNewtonsoftConverter.cs @@ -0,0 +1,110 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateUserResponse that handles the Option<> structure + /// + public class UpdateUserResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateUserResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateUserResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? givenName = default(string?); + if (jsonObject["given_name"] != null) + { + givenName = jsonObject["given_name"].ToObject(); + } + string? familyName = default(string?); + if (jsonObject["family_name"] != null) + { + familyName = jsonObject["family_name"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + bool? isSuspended = default(bool?); + if (jsonObject["is_suspended"] != null) + { + isSuspended = jsonObject["is_suspended"].ToObject(serializer); + } + bool? isPasswordResetRequested = default(bool?); + if (jsonObject["is_password_reset_requested"] != null) + { + isPasswordResetRequested = jsonObject["is_password_reset_requested"].ToObject(serializer); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + + return new UpdateUserResponse( + id: id != null ? new Option(id) : default, givenName: givenName != null ? new Option(givenName) : default, familyName: familyName != null ? new Option(familyName) : default, email: email != null ? new Option(email) : default, isSuspended: isSuspended != null ? new Option(isSuspended) : default, isPasswordResetRequested: isPasswordResetRequested != null ? new Option(isPasswordResetRequested) : default, picture: picture != null ? new Option(picture) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateUserResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.GivenNameOption.IsSet && value.GivenName != null) + { + writer.WritePropertyName("given_name"); + serializer.Serialize(writer, value.GivenName); + } + if (value.FamilyNameOption.IsSet && value.FamilyName != null) + { + writer.WritePropertyName("family_name"); + serializer.Serialize(writer, value.FamilyName); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.IsSuspendedOption.IsSet && value.IsSuspended != null) + { + writer.WritePropertyName("is_suspended"); + serializer.Serialize(writer, value.IsSuspended); + } + if (value.IsPasswordResetRequestedOption.IsSet && value.IsPasswordResetRequested != null) + { + writer.WritePropertyName("is_password_reset_requested"); + serializer.Serialize(writer, value.IsPasswordResetRequested); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateWebHookRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateWebHookRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..dd0696a --- /dev/null +++ b/Kinde.Api/Converters/UpdateWebHookRequestNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateWebHookRequest that handles the Option<> structure + /// + public class UpdateWebHookRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateWebHookRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateWebHookRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + List eventTypes = default(List); + if (jsonObject["event_types"] != null) + { + eventTypes = jsonObject["event_types"].ToObject>(serializer); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + + return new UpdateWebHookRequest( + eventTypes: eventTypes != null ? new Option?>(eventTypes) : default, name: name != null ? new Option(name) : default, description: description != null ? new Option(description) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateWebHookRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.EventTypesOption.IsSet) + { + writer.WritePropertyName("event_types"); + serializer.Serialize(writer, value.EventTypes); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateWebhookResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateWebhookResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..87da8c3 --- /dev/null +++ b/Kinde.Api/Converters/UpdateWebhookResponseNewtonsoftConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateWebhookResponse that handles the Option<> structure + /// + public class UpdateWebhookResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateWebhookResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateWebhookResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + UpdateWebhookResponseWebhook? webhook = default(UpdateWebhookResponseWebhook?); + if (jsonObject["webhook"] != null) + { + webhook = jsonObject["webhook"].ToObject(serializer); + } + + return new UpdateWebhookResponse( + message: message != null ? new Option(message) : default, code: code != null ? new Option(code) : default, webhook: webhook != null ? new Option(webhook) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateWebhookResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.WebhookOption.IsSet && value.Webhook != null) + { + writer.WritePropertyName("webhook"); + serializer.Serialize(writer, value.Webhook); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UpdateWebhookResponseWebhookNewtonsoftConverter.cs b/Kinde.Api/Converters/UpdateWebhookResponseWebhookNewtonsoftConverter.cs new file mode 100644 index 0000000..a2d8263 --- /dev/null +++ b/Kinde.Api/Converters/UpdateWebhookResponseWebhookNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UpdateWebhookResponseWebhook that handles the Option<> structure + /// + public class UpdateWebhookResponseWebhookNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UpdateWebhookResponseWebhook ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UpdateWebhookResponseWebhook existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + + return new UpdateWebhookResponseWebhook( + id: id != null ? new Option(id) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UpdateWebhookResponseWebhook value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UserBillingNewtonsoftConverter.cs b/Kinde.Api/Converters/UserBillingNewtonsoftConverter.cs new file mode 100644 index 0000000..4c24cc9 --- /dev/null +++ b/Kinde.Api/Converters/UserBillingNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UserBilling that handles the Option<> structure + /// + public class UserBillingNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UserBilling ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UserBilling existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? customerId = default(string?); + if (jsonObject["customer_id"] != null) + { + customerId = jsonObject["customer_id"].ToObject(); + } + + return new UserBilling( + customerId: customerId != null ? new Option(customerId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UserBilling value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CustomerIdOption.IsSet && value.CustomerId != null) + { + writer.WritePropertyName("customer_id"); + serializer.Serialize(writer, value.CustomerId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UserIdentitiesInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/UserIdentitiesInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..699a92f --- /dev/null +++ b/Kinde.Api/Converters/UserIdentitiesInnerNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UserIdentitiesInner that handles the Option<> structure + /// + public class UserIdentitiesInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UserIdentitiesInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UserIdentitiesInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + string? identity = default(string?); + if (jsonObject["identity"] != null) + { + identity = jsonObject["identity"].ToObject(); + } + + return new UserIdentitiesInner( + type: type != null ? new Option(type) : default, identity: identity != null ? new Option(identity) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UserIdentitiesInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.IdentityOption.IsSet && value.Identity != null) + { + writer.WritePropertyName("identity"); + serializer.Serialize(writer, value.Identity); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UserIdentityNewtonsoftConverter.cs b/Kinde.Api/Converters/UserIdentityNewtonsoftConverter.cs new file mode 100644 index 0000000..6a28471 --- /dev/null +++ b/Kinde.Api/Converters/UserIdentityNewtonsoftConverter.cs @@ -0,0 +1,60 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UserIdentity that handles the Option<> structure + /// + public class UserIdentityNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UserIdentity ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UserIdentity existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? type = default(string?); + if (jsonObject["type"] != null) + { + type = jsonObject["type"].ToObject(); + } + UserIdentityResult? result = default(UserIdentityResult?); + if (jsonObject["result"] != null) + { + result = jsonObject["result"].ToObject(serializer); + } + + return new UserIdentity( + type: type != null ? new Option(type) : default, result: result != null ? new Option(result) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UserIdentity value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.TypeOption.IsSet && value.Type != null) + { + writer.WritePropertyName("type"); + serializer.Serialize(writer, value.Type); + } + if (value.ResultOption.IsSet && value.Result != null) + { + writer.WritePropertyName("result"); + serializer.Serialize(writer, value.Result); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UserIdentityResultNewtonsoftConverter.cs b/Kinde.Api/Converters/UserIdentityResultNewtonsoftConverter.cs new file mode 100644 index 0000000..4dabb95 --- /dev/null +++ b/Kinde.Api/Converters/UserIdentityResultNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UserIdentityResult that handles the Option<> structure + /// + public class UserIdentityResultNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UserIdentityResult ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UserIdentityResult existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + bool? created = default(bool?); + if (jsonObject["created"] != null) + { + created = jsonObject["created"].ToObject(serializer); + } + + return new UserIdentityResult( + created: created != null ? new Option(created) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UserIdentityResult value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CreatedOption.IsSet && value.Created != null) + { + writer.WritePropertyName("created"); + serializer.Serialize(writer, value.Created); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UserNewtonsoftConverter.cs b/Kinde.Api/Converters/UserNewtonsoftConverter.cs new file mode 100644 index 0000000..64f965a --- /dev/null +++ b/Kinde.Api/Converters/UserNewtonsoftConverter.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for User that handles the Option<> structure + /// + public class UserNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override User ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, User existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + string? preferredEmail = default(string?); + if (jsonObject["preferred_email"] != null) + { + preferredEmail = jsonObject["preferred_email"].ToObject(); + } + string? phone = default(string?); + if (jsonObject["phone"] != null) + { + phone = jsonObject["phone"].ToObject(); + } + string? username = default(string?); + if (jsonObject["username"] != null) + { + username = jsonObject["username"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + bool? isSuspended = default(bool?); + if (jsonObject["is_suspended"] != null) + { + isSuspended = jsonObject["is_suspended"].ToObject(serializer); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + int? totalSignIns = default(int?); + if (jsonObject["total_sign_ins"] != null) + { + totalSignIns = jsonObject["total_sign_ins"].ToObject(serializer); + } + int? failedSignIns = default(int?); + if (jsonObject["failed_sign_ins"] != null) + { + failedSignIns = jsonObject["failed_sign_ins"].ToObject(serializer); + } + string? lastSignedIn = default(string?); + if (jsonObject["last_signed_in"] != null) + { + lastSignedIn = jsonObject["last_signed_in"].ToObject(); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + List organizations = default(List); + if (jsonObject["organizations"] != null) + { + organizations = jsonObject["organizations"].ToObject>(serializer); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); + } + UserBilling? billing = default(UserBilling?); + if (jsonObject["billing"] != null) + { + billing = jsonObject["billing"].ToObject(serializer); + } + + return new User( + id: id != null ? new Option(id) : default, providedId: providedId != null ? new Option(providedId) : default, preferredEmail: preferredEmail != null ? new Option(preferredEmail) : default, phone: phone != null ? new Option(phone) : default, username: username != null ? new Option(username) : default, lastName: lastName != null ? new Option(lastName) : default, firstName: firstName != null ? new Option(firstName) : default, isSuspended: isSuspended != null ? new Option(isSuspended) : default, picture: picture != null ? new Option(picture) : default, totalSignIns: totalSignIns != null ? new Option(totalSignIns) : default, failedSignIns: failedSignIns != null ? new Option(failedSignIns) : default, lastSignedIn: lastSignedIn != null ? new Option(lastSignedIn) : default, createdOn: createdOn != null ? new Option(createdOn) : default, organizations: organizations != null ? new Option?>(organizations) : default, identities: identities != null ? new Option?>(identities) : default, billing: billing != null ? new Option(billing) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, User value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + if (value.PreferredEmailOption.IsSet && value.PreferredEmail != null) + { + writer.WritePropertyName("preferred_email"); + serializer.Serialize(writer, value.PreferredEmail); + } + if (value.PhoneOption.IsSet && value.Phone != null) + { + writer.WritePropertyName("phone"); + serializer.Serialize(writer, value.Phone); + } + if (value.UsernameOption.IsSet && value.Username != null) + { + writer.WritePropertyName("username"); + serializer.Serialize(writer, value.Username); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.IsSuspendedOption.IsSet && value.IsSuspended != null) + { + writer.WritePropertyName("is_suspended"); + serializer.Serialize(writer, value.IsSuspended); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.TotalSignInsOption.IsSet && value.TotalSignIns != null) + { + writer.WritePropertyName("total_sign_ins"); + serializer.Serialize(writer, value.TotalSignIns); + } + if (value.FailedSignInsOption.IsSet && value.FailedSignIns != null) + { + writer.WritePropertyName("failed_sign_ins"); + serializer.Serialize(writer, value.FailedSignIns); + } + if (value.LastSignedInOption.IsSet && value.LastSignedIn != null) + { + writer.WritePropertyName("last_signed_in"); + serializer.Serialize(writer, value.LastSignedIn); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.OrganizationsOption.IsSet) + { + writer.WritePropertyName("organizations"); + serializer.Serialize(writer, value.Organizations); + } + if (value.IdentitiesOption.IsSet) + { + writer.WritePropertyName("identities"); + serializer.Serialize(writer, value.Identities); + } + if (value.BillingOption.IsSet && value.Billing != null) + { + writer.WritePropertyName("billing"); + serializer.Serialize(writer, value.Billing); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UsersResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/UsersResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..93ad7c0 --- /dev/null +++ b/Kinde.Api/Converters/UsersResponseNewtonsoftConverter.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UsersResponse that handles the Option<> structure + /// + public class UsersResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UsersResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UsersResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + List users = default(List); + if (jsonObject["users"] != null) + { + users = jsonObject["users"].ToObject>(serializer); + } + string? nextToken = default(string?); + if (jsonObject["next_token"] != null) + { + nextToken = jsonObject["next_token"].ToObject(); + } + + return new UsersResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, users: users != null ? new Option?>(users) : default, nextToken: nextToken != null ? new Option(nextToken) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UsersResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.UsersOption.IsSet) + { + writer.WritePropertyName("users"); + serializer.Serialize(writer, value.Users); + } + if (value.NextTokenOption.IsSet && value.NextToken != null) + { + writer.WritePropertyName("next_token"); + serializer.Serialize(writer, value.NextToken); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UsersResponseUsersInnerBillingNewtonsoftConverter.cs b/Kinde.Api/Converters/UsersResponseUsersInnerBillingNewtonsoftConverter.cs new file mode 100644 index 0000000..f07874e --- /dev/null +++ b/Kinde.Api/Converters/UsersResponseUsersInnerBillingNewtonsoftConverter.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UsersResponseUsersInnerBilling that handles the Option<> structure + /// + public class UsersResponseUsersInnerBillingNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UsersResponseUsersInnerBilling ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UsersResponseUsersInnerBilling existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? customerId = default(string?); + if (jsonObject["customer_id"] != null) + { + customerId = jsonObject["customer_id"].ToObject(); + } + + return new UsersResponseUsersInnerBilling( + customerId: customerId != null ? new Option(customerId) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UsersResponseUsersInnerBilling value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CustomerIdOption.IsSet && value.CustomerId != null) + { + writer.WritePropertyName("customer_id"); + serializer.Serialize(writer, value.CustomerId); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/UsersResponseUsersInnerNewtonsoftConverter.cs b/Kinde.Api/Converters/UsersResponseUsersInnerNewtonsoftConverter.cs new file mode 100644 index 0000000..4239125 --- /dev/null +++ b/Kinde.Api/Converters/UsersResponseUsersInnerNewtonsoftConverter.cs @@ -0,0 +1,200 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for UsersResponseUsersInner that handles the Option<> structure + /// + public class UsersResponseUsersInnerNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override UsersResponseUsersInner ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, UsersResponseUsersInner existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? providedId = default(string?); + if (jsonObject["provided_id"] != null) + { + providedId = jsonObject["provided_id"].ToObject(); + } + string? email = default(string?); + if (jsonObject["email"] != null) + { + email = jsonObject["email"].ToObject(); + } + string? phone = default(string?); + if (jsonObject["phone"] != null) + { + phone = jsonObject["phone"].ToObject(); + } + string? username = default(string?); + if (jsonObject["username"] != null) + { + username = jsonObject["username"].ToObject(); + } + string? lastName = default(string?); + if (jsonObject["last_name"] != null) + { + lastName = jsonObject["last_name"].ToObject(); + } + string? firstName = default(string?); + if (jsonObject["first_name"] != null) + { + firstName = jsonObject["first_name"].ToObject(); + } + bool? isSuspended = default(bool?); + if (jsonObject["is_suspended"] != null) + { + isSuspended = jsonObject["is_suspended"].ToObject(serializer); + } + string? picture = default(string?); + if (jsonObject["picture"] != null) + { + picture = jsonObject["picture"].ToObject(); + } + int? totalSignIns = default(int?); + if (jsonObject["total_sign_ins"] != null) + { + totalSignIns = jsonObject["total_sign_ins"].ToObject(serializer); + } + int? failedSignIns = default(int?); + if (jsonObject["failed_sign_ins"] != null) + { + failedSignIns = jsonObject["failed_sign_ins"].ToObject(serializer); + } + string? lastSignedIn = default(string?); + if (jsonObject["last_signed_in"] != null) + { + lastSignedIn = jsonObject["last_signed_in"].ToObject(); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + List organizations = default(List); + if (jsonObject["organizations"] != null) + { + organizations = jsonObject["organizations"].ToObject>(serializer); + } + List identities = default(List); + if (jsonObject["identities"] != null) + { + identities = jsonObject["identities"].ToObject>(serializer); + } + UsersResponseUsersInnerBilling? billing = default(UsersResponseUsersInnerBilling?); + if (jsonObject["billing"] != null) + { + billing = jsonObject["billing"].ToObject(serializer); + } + + return new UsersResponseUsersInner( + id: id != null ? new Option(id) : default, providedId: providedId != null ? new Option(providedId) : default, email: email != null ? new Option(email) : default, phone: phone != null ? new Option(phone) : default, username: username != null ? new Option(username) : default, lastName: lastName != null ? new Option(lastName) : default, firstName: firstName != null ? new Option(firstName) : default, isSuspended: isSuspended != null ? new Option(isSuspended) : default, picture: picture != null ? new Option(picture) : default, totalSignIns: totalSignIns != null ? new Option(totalSignIns) : default, failedSignIns: failedSignIns != null ? new Option(failedSignIns) : default, lastSignedIn: lastSignedIn != null ? new Option(lastSignedIn) : default, createdOn: createdOn != null ? new Option(createdOn) : default, organizations: organizations != null ? new Option?>(organizations) : default, identities: identities != null ? new Option?>(identities) : default, billing: billing != null ? new Option(billing) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, UsersResponseUsersInner value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.ProvidedIdOption.IsSet && value.ProvidedId != null) + { + writer.WritePropertyName("provided_id"); + serializer.Serialize(writer, value.ProvidedId); + } + if (value.EmailOption.IsSet && value.Email != null) + { + writer.WritePropertyName("email"); + serializer.Serialize(writer, value.Email); + } + if (value.PhoneOption.IsSet && value.Phone != null) + { + writer.WritePropertyName("phone"); + serializer.Serialize(writer, value.Phone); + } + if (value.UsernameOption.IsSet && value.Username != null) + { + writer.WritePropertyName("username"); + serializer.Serialize(writer, value.Username); + } + if (value.LastNameOption.IsSet && value.LastName != null) + { + writer.WritePropertyName("last_name"); + serializer.Serialize(writer, value.LastName); + } + if (value.FirstNameOption.IsSet && value.FirstName != null) + { + writer.WritePropertyName("first_name"); + serializer.Serialize(writer, value.FirstName); + } + if (value.IsSuspendedOption.IsSet && value.IsSuspended != null) + { + writer.WritePropertyName("is_suspended"); + serializer.Serialize(writer, value.IsSuspended); + } + if (value.PictureOption.IsSet && value.Picture != null) + { + writer.WritePropertyName("picture"); + serializer.Serialize(writer, value.Picture); + } + if (value.TotalSignInsOption.IsSet && value.TotalSignIns != null) + { + writer.WritePropertyName("total_sign_ins"); + serializer.Serialize(writer, value.TotalSignIns); + } + if (value.FailedSignInsOption.IsSet && value.FailedSignIns != null) + { + writer.WritePropertyName("failed_sign_ins"); + serializer.Serialize(writer, value.FailedSignIns); + } + if (value.LastSignedInOption.IsSet && value.LastSignedIn != null) + { + writer.WritePropertyName("last_signed_in"); + serializer.Serialize(writer, value.LastSignedIn); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + if (value.OrganizationsOption.IsSet) + { + writer.WritePropertyName("organizations"); + serializer.Serialize(writer, value.Organizations); + } + if (value.IdentitiesOption.IsSet) + { + writer.WritePropertyName("identities"); + serializer.Serialize(writer, value.Identities); + } + if (value.BillingOption.IsSet && value.Billing != null) + { + writer.WritePropertyName("billing"); + serializer.Serialize(writer, value.Billing); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/VerifyApiKeyRequestNewtonsoftConverter.cs b/Kinde.Api/Converters/VerifyApiKeyRequestNewtonsoftConverter.cs new file mode 100644 index 0000000..5fa463d --- /dev/null +++ b/Kinde.Api/Converters/VerifyApiKeyRequestNewtonsoftConverter.cs @@ -0,0 +1,45 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for VerifyApiKeyRequest that handles the Option<> structure + /// + public class VerifyApiKeyRequestNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override VerifyApiKeyRequest ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, VerifyApiKeyRequest existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string apiKey = default(string); + if (jsonObject["api_key"] != null) + { + apiKey = jsonObject["api_key"].ToObject(); + } + + return new VerifyApiKeyRequest( + apiKey: apiKey ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, VerifyApiKeyRequest value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/VerifyApiKeyResponseNewtonsoftConverter.cs b/Kinde.Api/Converters/VerifyApiKeyResponseNewtonsoftConverter.cs new file mode 100644 index 0000000..cd0e61e --- /dev/null +++ b/Kinde.Api/Converters/VerifyApiKeyResponseNewtonsoftConverter.cs @@ -0,0 +1,140 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for VerifyApiKeyResponse that handles the Option<> structure + /// + public class VerifyApiKeyResponseNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override VerifyApiKeyResponse ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, VerifyApiKeyResponse existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? code = default(string?); + if (jsonObject["code"] != null) + { + code = jsonObject["code"].ToObject(); + } + string? message = default(string?); + if (jsonObject["message"] != null) + { + message = jsonObject["message"].ToObject(); + } + bool? isValid = default(bool?); + if (jsonObject["is_valid"] != null) + { + isValid = jsonObject["is_valid"].ToObject(serializer); + } + string? keyId = default(string?); + if (jsonObject["key_id"] != null) + { + keyId = jsonObject["key_id"].ToObject(); + } + string? status = default(string?); + if (jsonObject["status"] != null) + { + status = jsonObject["status"].ToObject(); + } + List scopes = default(List); + if (jsonObject["scopes"] != null) + { + scopes = jsonObject["scopes"].ToObject>(serializer); + } + string? orgCode = default(string?); + if (jsonObject["org_code"] != null) + { + orgCode = jsonObject["org_code"].ToObject(); + } + string? userId = default(string?); + if (jsonObject["user_id"] != null) + { + userId = jsonObject["user_id"].ToObject(); + } + DateTimeOffset? lastVerifiedOn = default(DateTimeOffset?); + if (jsonObject["last_verified_on"] != null) + { + lastVerifiedOn = jsonObject["last_verified_on"].ToObject(serializer); + } + int? verificationCount = default(int?); + if (jsonObject["verification_count"] != null) + { + verificationCount = jsonObject["verification_count"].ToObject(serializer); + } + + return new VerifyApiKeyResponse( + code: code != null ? new Option(code) : default, message: message != null ? new Option(message) : default, isValid: isValid != null ? new Option(isValid) : default, keyId: keyId != null ? new Option(keyId) : default, status: status != null ? new Option(status) : default, scopes: scopes != null ? new Option?>(scopes) : default, orgCode: orgCode != null ? new Option(orgCode) : default, userId: userId != null ? new Option(userId) : default, lastVerifiedOn: lastVerifiedOn != null ? new Option(lastVerifiedOn) : default, verificationCount: verificationCount != null ? new Option(verificationCount) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, VerifyApiKeyResponse value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.CodeOption.IsSet && value.Code != null) + { + writer.WritePropertyName("code"); + serializer.Serialize(writer, value.Code); + } + if (value.MessageOption.IsSet && value.Message != null) + { + writer.WritePropertyName("message"); + serializer.Serialize(writer, value.Message); + } + if (value.IsValidOption.IsSet && value.IsValid != null) + { + writer.WritePropertyName("is_valid"); + serializer.Serialize(writer, value.IsValid); + } + if (value.KeyIdOption.IsSet && value.KeyId != null) + { + writer.WritePropertyName("key_id"); + serializer.Serialize(writer, value.KeyId); + } + if (value.StatusOption.IsSet && value.Status != null) + { + writer.WritePropertyName("status"); + serializer.Serialize(writer, value.Status); + } + if (value.ScopesOption.IsSet) + { + writer.WritePropertyName("scopes"); + serializer.Serialize(writer, value.Scopes); + } + if (value.OrgCodeOption.IsSet && value.OrgCode != null) + { + writer.WritePropertyName("org_code"); + serializer.Serialize(writer, value.OrgCode); + } + if (value.UserIdOption.IsSet && value.UserId != null) + { + writer.WritePropertyName("user_id"); + serializer.Serialize(writer, value.UserId); + } + if (value.LastVerifiedOnOption.IsSet && value.LastVerifiedOn != null) + { + writer.WritePropertyName("last_verified_on"); + serializer.Serialize(writer, value.LastVerifiedOn); + } + if (value.VerificationCountOption.IsSet && value.VerificationCount != null) + { + writer.WritePropertyName("verification_count"); + serializer.Serialize(writer, value.VerificationCount); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Converters/WebhookNewtonsoftConverter.cs b/Kinde.Api/Converters/WebhookNewtonsoftConverter.cs new file mode 100644 index 0000000..c72b27b --- /dev/null +++ b/Kinde.Api/Converters/WebhookNewtonsoftConverter.cs @@ -0,0 +1,100 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Kinde.Api.Model; +using Kinde.Api.Client; + +namespace Kinde.Api.Converters +{ + /// + /// Newtonsoft.Json converter for Webhook that handles the Option<> structure + /// + public class WebhookNewtonsoftConverter : Newtonsoft.Json.JsonConverter + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override Webhook ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, Webhook existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + + string? id = default(string?); + if (jsonObject["id"] != null) + { + id = jsonObject["id"].ToObject(); + } + string? name = default(string?); + if (jsonObject["name"] != null) + { + name = jsonObject["name"].ToObject(); + } + string? endpoint = default(string?); + if (jsonObject["endpoint"] != null) + { + endpoint = jsonObject["endpoint"].ToObject(); + } + string? description = default(string?); + if (jsonObject["description"] != null) + { + description = jsonObject["description"].ToObject(); + } + List eventTypes = default(List); + if (jsonObject["event_types"] != null) + { + eventTypes = jsonObject["event_types"].ToObject>(serializer); + } + string? createdOn = default(string?); + if (jsonObject["created_on"] != null) + { + createdOn = jsonObject["created_on"].ToObject(); + } + + return new Webhook( + id: id != null ? new Option(id) : default, name: name != null ? new Option(name) : default, endpoint: endpoint != null ? new Option(endpoint) : default, description: description != null ? new Option(description) : default, eventTypes: eventTypes != null ? new Option?>(eventTypes) : default, createdOn: createdOn != null ? new Option(createdOn) : default ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, Webhook value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + + if (value.IdOption.IsSet && value.Id != null) + { + writer.WritePropertyName("id"); + serializer.Serialize(writer, value.Id); + } + if (value.NameOption.IsSet && value.Name != null) + { + writer.WritePropertyName("name"); + serializer.Serialize(writer, value.Name); + } + if (value.EndpointOption.IsSet && value.Endpoint != null) + { + writer.WritePropertyName("endpoint"); + serializer.Serialize(writer, value.Endpoint); + } + if (value.DescriptionOption.IsSet && value.Description != null) + { + writer.WritePropertyName("description"); + serializer.Serialize(writer, value.Description); + } + if (value.EventTypesOption.IsSet) + { + writer.WritePropertyName("event_types"); + serializer.Serialize(writer, value.EventTypes); + } + if (value.CreatedOnOption.IsSet && value.CreatedOn != null) + { + writer.WritePropertyName("created_on"); + serializer.Serialize(writer, value.CreatedOn); + } + + writer.WriteEndObject(); + } + } +} \ No newline at end of file diff --git a/Kinde.Api/Model/UpdateRolesRequest.cs b/Kinde.Api/Model/UpdateRolesRequest.cs index 730f75f..b5b3060 100644 --- a/Kinde.Api/Model/UpdateRolesRequest.cs +++ b/Kinde.Api/Model/UpdateRolesRequest.cs @@ -37,7 +37,7 @@ public partial class UpdateRolesRequest /// The role identifier to use in code. /// The role's description. /// Set role as default for new users. - /// The public ID of the permission required to assign this role to users. If null, no permission is required. + /// The public ID of the permission required to assign this role to users. If null, no change to the assignment permission is made. If set to 'NO_PERMISSION_REQUIRED', no permission is required. [JsonConstructor] public UpdateRolesRequest(string name, string key, Option description = default, Option isDefaultRole = default, Option assignmentPermissionId = default) { @@ -101,9 +101,9 @@ public UpdateRolesRequest(string name, string key, Option description = public Option AssignmentPermissionIdOption { get; private set; } /// - /// The public ID of the permission required to assign this role to users. If null, no permission is required. + /// The public ID of the permission required to assign this role to users. If null, no change to the assignment permission is made. If set to 'NO_PERMISSION_REQUIRED', no permission is required. /// - /// The public ID of the permission required to assign this role to users. If null, no permission is required. + /// The public ID of the permission required to assign this role to users. If null, no change to the assignment permission is made. If set to 'NO_PERMISSION_REQUIRED', no permission is required. [JsonPropertyName("assignment_permission_id")] public Guid? AssignmentPermissionId { get { return this.AssignmentPermissionIdOption; } set { this.AssignmentPermissionIdOption = new Option(value); } } diff --git a/Kinde.Api/Model/User.cs b/Kinde.Api/Model/User.cs index d740793..c0dcbc3 100644 --- a/Kinde.Api/Model/User.cs +++ b/Kinde.Api/Model/User.cs @@ -48,8 +48,9 @@ public partial class User /// Date of user creation in ISO 8601 format. /// Array of organizations a user belongs to. /// Array of identities belonging to the user. + /// billing [JsonConstructor] - public User(Option id = default, Option providedId = default, Option preferredEmail = default, Option phone = default, Option username = default, Option lastName = default, Option firstName = default, Option isSuspended = default, Option picture = default, Option totalSignIns = default, Option failedSignIns = default, Option lastSignedIn = default, Option createdOn = default, Option?> organizations = default, Option?> identities = default) + public User(Option id = default, Option providedId = default, Option preferredEmail = default, Option phone = default, Option username = default, Option lastName = default, Option firstName = default, Option isSuspended = default, Option picture = default, Option totalSignIns = default, Option failedSignIns = default, Option lastSignedIn = default, Option createdOn = default, Option?> organizations = default, Option?> identities = default, Option billing = default) { IdOption = id; ProvidedIdOption = providedId; @@ -66,6 +67,7 @@ public User(Option id = default, Option providedId = default, CreatedOnOption = createdOn; OrganizationsOption = organizations; IdentitiesOption = identities; + BillingOption = billing; OnCreated(); } @@ -281,6 +283,19 @@ public User(Option id = default, Option providedId = default, [JsonPropertyName("identities")] public List? Identities { get { return this.IdentitiesOption; } set { this.IdentitiesOption = new Option?>(value); } } + /// + /// Used to track the state of Billing + /// + [JsonIgnore] + [global::System.ComponentModel.EditorBrowsable(global::System.ComponentModel.EditorBrowsableState.Never)] + public Option BillingOption { get; private set; } + + /// + /// Gets or Sets Billing + /// + [JsonPropertyName("billing")] + public UserBilling? Billing { get { return this.BillingOption; } set { this.BillingOption = new Option(value); } } + /// /// Returns the string presentation of the object /// @@ -304,6 +319,7 @@ public override string ToString() sb.Append(" CreatedOn: ").Append(CreatedOn).Append("\n"); sb.Append(" Organizations: ").Append(Organizations).Append("\n"); sb.Append(" Identities: ").Append(Identities).Append("\n"); + sb.Append(" Billing: ").Append(Billing).Append("\n"); sb.Append("}\n"); return sb.ToString(); } @@ -346,6 +362,7 @@ public override User Read(ref Utf8JsonReader utf8JsonReader, Type typeToConvert, Option createdOn = default; Option?> organizations = default; Option?> identities = default; + Option billing = default; while (utf8JsonReader.Read()) { @@ -407,6 +424,9 @@ public override User Read(ref Utf8JsonReader utf8JsonReader, Type typeToConvert, case "identities": identities = new Option?>(JsonSerializer.Deserialize>(ref utf8JsonReader, jsonSerializerOptions)!); break; + case "billing": + billing = new Option(JsonSerializer.Deserialize(ref utf8JsonReader, jsonSerializerOptions)!); + break; default: break; } @@ -446,7 +466,10 @@ public override User Read(ref Utf8JsonReader utf8JsonReader, Type typeToConvert, if (identities.IsSet && identities.Value == null) throw new ArgumentNullException(nameof(identities), "Property is not nullable for class User."); - return new User(id, providedId, preferredEmail, phone, username, lastName, firstName, isSuspended, picture, totalSignIns, failedSignIns, lastSignedIn, createdOn, organizations, identities); + if (billing.IsSet && billing.Value == null) + throw new ArgumentNullException(nameof(billing), "Property is not nullable for class User."); + + return new User(id, providedId, preferredEmail, phone, username, lastName, firstName, isSuspended, picture, totalSignIns, failedSignIns, lastSignedIn, createdOn, organizations, identities, billing); } /// @@ -503,6 +526,9 @@ public void WriteProperties(Utf8JsonWriter writer, User user, JsonSerializerOpti if (user.IdentitiesOption.IsSet && user.Identities == null) throw new ArgumentNullException(nameof(user.Identities), "Property is required for class User."); + if (user.BillingOption.IsSet && user.Billing == null) + throw new ArgumentNullException(nameof(user.Billing), "Property is required for class User."); + if (user.IdOption.IsSet) writer.WriteString("id", user.Id); @@ -564,6 +590,11 @@ public void WriteProperties(Utf8JsonWriter writer, User user, JsonSerializerOpti writer.WritePropertyName("identities"); JsonSerializer.Serialize(writer, user.Identities, jsonSerializerOptions); } + if (user.BillingOption.IsSet) + { + writer.WritePropertyName("billing"); + JsonSerializer.Serialize(writer, user.Billing, jsonSerializerOptions); + } } } } diff --git a/Kinde.Api/Model/UserBilling.cs b/Kinde.Api/Model/UserBilling.cs new file mode 100644 index 0000000..f7da381 --- /dev/null +++ b/Kinde.Api/Model/UserBilling.cs @@ -0,0 +1,158 @@ +// +/* + * Kinde Management API + * + * Provides endpoints to manage your Kinde Businesses. ## Intro ## How to use 1. [Set up and authorize a machine-to-machine (M2M) application](https://docs.kinde.com/developer-tools/kinde-api/connect-to-kinde-api/). 2. [Generate a test access token](https://docs.kinde.com/developer-tools/kinde-api/access-token-for-api/) 3. Test request any endpoint using the test token + * + * The version of the OpenAPI document: 1 + * Contact: support@kinde.com + * Generated by: https://github.com/openapitools/openapi-generator.git + */ + +#nullable enable + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Linq; +using System.IO; +using System.Text; +using System.Text.RegularExpressions; +using System.Text.Json; +using System.Text.Json.Serialization; +using Kinde.Api.Client; + +namespace Kinde.Api.Model +{ + /// + /// UserBilling + /// + public partial class UserBilling + { + /// + /// Initializes a new instance of the class. + /// + /// customerId + [JsonConstructor] + public UserBilling(Option customerId = default) + { + CustomerIdOption = customerId; + OnCreated(); + } + + partial void OnCreated(); + + /// + /// Used to track the state of CustomerId + /// + [JsonIgnore] + [global::System.ComponentModel.EditorBrowsable(global::System.ComponentModel.EditorBrowsableState.Never)] + public Option CustomerIdOption { get; private set; } + + /// + /// Gets or Sets CustomerId + /// + [JsonPropertyName("customer_id")] + public string? CustomerId { get { return this.CustomerIdOption; } set { this.CustomerIdOption = new Option(value); } } + + /// + /// Returns the string presentation of the object + /// + /// String presentation of the object + public override string ToString() + { + StringBuilder sb = new StringBuilder(); + sb.Append("class UserBilling {\n"); + sb.Append(" CustomerId: ").Append(CustomerId).Append("\n"); + sb.Append("}\n"); + return sb.ToString(); + } + } + + /// + /// A Json converter for type + /// + public class UserBillingJsonConverter : JsonConverter + { + /// + /// Deserializes json to + /// + /// + /// + /// + /// + /// + public override UserBilling Read(ref Utf8JsonReader utf8JsonReader, Type typeToConvert, JsonSerializerOptions jsonSerializerOptions) + { + int currentDepth = utf8JsonReader.CurrentDepth; + + if (utf8JsonReader.TokenType != JsonTokenType.StartObject && utf8JsonReader.TokenType != JsonTokenType.StartArray) + throw new JsonException(); + + JsonTokenType startingTokenType = utf8JsonReader.TokenType; + + Option customerId = default; + + while (utf8JsonReader.Read()) + { + if (startingTokenType == JsonTokenType.StartObject && utf8JsonReader.TokenType == JsonTokenType.EndObject && currentDepth == utf8JsonReader.CurrentDepth) + break; + + if (startingTokenType == JsonTokenType.StartArray && utf8JsonReader.TokenType == JsonTokenType.EndArray && currentDepth == utf8JsonReader.CurrentDepth) + break; + + if (utf8JsonReader.TokenType == JsonTokenType.PropertyName && currentDepth == utf8JsonReader.CurrentDepth - 1) + { + string? localVarJsonPropertyName = utf8JsonReader.GetString(); + utf8JsonReader.Read(); + + switch (localVarJsonPropertyName) + { + case "customer_id": + customerId = new Option(utf8JsonReader.GetString()!); + break; + default: + break; + } + } + } + + if (customerId.IsSet && customerId.Value == null) + throw new ArgumentNullException(nameof(customerId), "Property is not nullable for class UserBilling."); + + return new UserBilling(customerId); + } + + /// + /// Serializes a + /// + /// + /// + /// + /// + public override void Write(Utf8JsonWriter writer, UserBilling userBilling, JsonSerializerOptions jsonSerializerOptions) + { + writer.WriteStartObject(); + + WriteProperties(writer, userBilling, jsonSerializerOptions); + writer.WriteEndObject(); + } + + /// + /// Serializes the properties of + /// + /// + /// + /// + /// + public void WriteProperties(Utf8JsonWriter writer, UserBilling userBilling, JsonSerializerOptions jsonSerializerOptions) + { + if (userBilling.CustomerIdOption.IsSet && userBilling.CustomerId == null) + throw new ArgumentNullException(nameof(userBilling.CustomerId), "Property is required for class UserBilling."); + + if (userBilling.CustomerIdOption.IsSet) + writer.WriteString("customer_id", userBilling.CustomerId); + } + } +} diff --git a/generate-all-apis.sh b/generate-all-apis.sh index 1fb2c5e..7706c7f 100755 --- a/generate-all-apis.sh +++ b/generate-all-apis.sh @@ -580,6 +580,199 @@ apply_compatibility_fixes() { fi } +# Generate converters for Response models +generate_converters() { + print_header "=== Generating Newtonsoft.Json Converters ===" + print_status "Generating converters for Response models with Option<> properties..." + + local generator_dir="${REPO_ROOT}/tools/converter-generator" + local generator_script="${generator_dir}/generate_converters.py" + + if [ ! -f "${generator_script}" ]; then + print_error "Converter generator not found at ${generator_script}" + exit 1 + fi + + # Check if Python 3 is available + if ! command -v python3 &> /dev/null; then + print_error "Python 3 is required but not found. Please install Python 3 to continue." + exit 1 + fi + + # Check if virtual environment exists, if not create it + local venv_dir="${generator_dir}/venv" + if [ ! -d "${venv_dir}" ]; then + print_status "Creating Python virtual environment..." + cd "${generator_dir}" + python3 -m venv venv + source venv/bin/activate + pip install -q -r requirements.txt + cd "${REPO_ROOT}" + fi + + # Activate virtual environment and run generator for Kinde.Api + print_status "Generating converters for Kinde.Api (Management API)..." + cd "${generator_dir}" + source venv/bin/activate + if python generate_converters.py --config config-kind-api.yaml; then + print_success "Kinde.Api converters generated successfully" + else + print_error "Failed to generate Kinde.Api converters" + cd "${REPO_ROOT}" + exit 1 + fi + + # Run generator for Kinde.Accounts (Frontend API) + print_status "Generating converters for Kinde.Accounts (Frontend API)..." + if python generate_converters.py --config config-accounts-api.yaml; then + print_success "Kinde.Accounts converters generated successfully" + else + print_error "Failed to generate Kinde.Accounts converters" + cd "${REPO_ROOT}" + exit 1 + fi + + cd "${REPO_ROOT}" + print_success "All converters generated successfully" +} + +# Generate integration tests from OpenAPI specifications +generate_integration_tests() { + print_header "=== Generating Integration Tests ===" + print_status "Generating integration tests for all API endpoints..." + + local generator_dir="${REPO_ROOT}/tools/test-generator" + local generator_script="${generator_dir}/generate_integration_tests.py" + + if [ ! -f "${generator_script}" ]; then + print_warning "Test generator not found at ${generator_script}, skipping test generation" + return 0 + fi + + # Check if Python 3 is available + if ! command -v python3 &> /dev/null; then + print_warning "Python 3 is required for test generation but not found. Skipping." + return 0 + fi + + # Check if virtual environment exists, if not create it + local venv_dir="${generator_dir}/venv" + if [ ! -d "${venv_dir}" ]; then + print_status "Creating Python virtual environment for test generator..." + cd "${generator_dir}" + if [ -f "setup-venv.sh" ]; then + bash setup-venv.sh + else + python3 -m venv venv + source venv/bin/activate + pip install -q -r requirements.txt + fi + cd "${REPO_ROOT}" + fi + + # Activate virtual environment and run generator + print_status "Generating integration tests for Management API..." + cd "${generator_dir}" + source venv/bin/activate + + # Use the OpenAPI spec from the API directory or download it + local main_spec="${REPO_ROOT}/api/openapi.yaml" + if [ ! -f "${main_spec}" ]; then + # Try to download it + print_status "Downloading OpenAPI spec..." + mkdir -p "${REPO_ROOT}/api" + if curl -fsSL "${MAIN_API_SPEC_URL}" -o "${main_spec}"; then + print_success "OpenAPI spec downloaded" + else + print_warning "Could not download OpenAPI spec, skipping test generation" + cd "${REPO_ROOT}" + return 0 + fi + fi + + local test_output="${REPO_ROOT}/Kinde.Api.Test/Integration/GeneratedConverterIntegrationTests.cs" + + if python generate_integration_tests.py \ + --spec "${main_spec}" \ + --output "${test_output}" \ + --namespace "Kinde.Api.Test.Integration"; then + print_success "Management API integration tests generated successfully" + else + print_warning "Failed to generate Management API integration tests" + fi + + cd "${REPO_ROOT}" + print_success "Integration test generation completed" +} + +# Generate mock responses from OpenAPI specifications +generate_mock_responses() { + print_header "=== Generating Mock Responses ===" + print_status "Generating mock responses for integration tests..." + + local generator_dir="${REPO_ROOT}/tools/test-generator" + local generator_script="${generator_dir}/generate_mock_responses.py" + + if [ ! -f "${generator_script}" ]; then + print_warning "Mock response generator not found at ${generator_script}, skipping mock response generation" + return 0 + fi + + # Check if Python 3 is available + if ! command -v python3 &> /dev/null; then + print_warning "Python 3 is required for mock response generation but not found. Skipping." + return 0 + fi + + # Use existing virtual environment from test generator + local venv_dir="${generator_dir}/venv" + if [ ! -d "${venv_dir}" ]; then + print_status "Creating Python virtual environment for mock response generator..." + cd "${generator_dir}" + if [ -f "setup-venv.sh" ]; then + bash setup-venv.sh + else + python3 -m venv venv + source venv/bin/activate + pip install -q -r requirements.txt + fi + cd "${REPO_ROOT}" + fi + + # Activate virtual environment and run generator + print_status "Generating mock responses for Management API..." + cd "${generator_dir}" + source venv/bin/activate + + # Use the OpenAPI spec from the API directory or download it + local main_spec="${REPO_ROOT}/api/openapi.yaml" + if [ ! -f "${main_spec}" ]; then + # Try to download it + print_status "Downloading OpenAPI spec..." + mkdir -p "${REPO_ROOT}/api" + if curl -fsSL "${MAIN_API_SPEC_URL}" -o "${main_spec}"; then + print_success "OpenAPI spec downloaded" + else + print_warning "Could not download OpenAPI spec, skipping mock response generation" + cd "${REPO_ROOT}" + return 0 + fi + fi + + local mock_output="${REPO_ROOT}/Kinde.Api.Test/Integration/GeneratedMockResponses.cs" + + if python generate_mock_responses.py \ + --spec "${main_spec}" \ + --output "${mock_output}"; then + print_success "Mock responses generated successfully" + else + print_warning "Failed to generate mock responses" + fi + + cd "${REPO_ROOT}" + print_success "Mock response generation completed" +} + # Clean up temporary files cleanup_temp_files() { print_status "Cleaning up temporary files..." @@ -602,6 +795,12 @@ main() { copy_accounts_api_files copy_missing_client_files fix_resilience_issues + generate_converters + + # Generate integration tests + generate_integration_tests + # Generate mock responses + generate_mock_responses apply_compatibility_fixes cleanup_temp_files diff --git a/tools/converter-generator/README-GENERATOR.md b/tools/converter-generator/README-GENERATOR.md new file mode 100644 index 0000000..abbf798 --- /dev/null +++ b/tools/converter-generator/README-GENERATOR.md @@ -0,0 +1,119 @@ +# Converter Generator + +Stable converter generator for Kinde .NET SDK that generates Newtonsoft.Json converters for C# models with Option<> properties. + +## Features + +- ✅ **Type-safe**: Full type hints and dataclasses +- ✅ **Template-based**: Jinja2 templates for maintainable code generation +- ✅ **Configuration**: YAML config file support +- ✅ **Comprehensive**: Handles all property types (string, enum, List<>, Dictionary<>, Object) +- ✅ **Robust**: C# reserved keyword escaping, nested generics support +- ✅ **Tested**: Unit tests based on production requirements +- ✅ **Stable**: Proven parsing logic with better structure + +## Setup + +1. **Create virtual environment:** + ```bash + ./setup-venv.sh + source venv/bin/activate + ``` + +2. **Install dependencies:** + ```bash + pip install -r requirements.txt + ``` + +## Usage + +### Generate All Converters + +```bash +python generate_converters.py --config config.yaml +``` + +### With Validation + +```bash +python generate_converters.py --config config.yaml --validate +``` + +### With Verbose Output + +```bash +python generate_converters.py --config config.yaml --verbose +``` + +### Custom Directories + +```bash +python generate_converters.py \ + --model-dir Kinde.Api/Model \ + --converter-dir Kinde.Api/Converters \ + --config config.yaml +``` + +## Configuration + +Edit `config.yaml` to: +- Exclude specific models from generation +- Configure directories +- Enable/disable validation +- Control which model types to generate + +## Testing + +Run unit tests: + +```bash +# All tests +pytest tests/ -v + +# Specific test file +pytest tests/test_parser.py -v + +# With coverage +pytest tests/ --cov=generate_converters --cov-report=html +``` + +## Manually Maintained Converters + +The following converters are manually maintained and should NOT be regenerated: + +- `CustomEnumConverter.cs` - Generic enum converters used across all models + +These are excluded in `config.yaml` and will be skipped during generation. + +## Generated Converters + +All converters in `Kinde.Api/Converters/` (except manually maintained ones) are generated by this script. They can be safely regenerated at any time. + +## Requirements + +- Python 3.11+ +- Virtual environment (venv) +- Dependencies from `requirements.txt` + +## Architecture + +- **Parser** (`CSharpModelParser`): Extracts properties from C# model files +- **Generator** (`ConverterGenerator`): Generates C# converter code using Jinja2 templates +- **Validator** (`ConverterValidator`): Validates generated converter structure +- **CLI** (`main`): Command-line interface using Click + +## Production Requirements + +The generator is tested against production Kinde requirements: + +1. ✅ Must extract Option<> properties correctly +2. ✅ Must extract required (non-Option) parameters +3. ✅ Must handle nested generics (List<>, Dictionary<>) +4. ✅ Must escape C# reserved keywords +5. ✅ Must find JSON property names from [JsonPropertyName] attributes +6. ✅ Must handle nullable types correctly +7. ✅ Must generate valid C# code that compiles +8. ✅ Must use PascalCase for property access in WriteJson +9. ✅ Must use camelCase for constructor parameters in ReadJson +10. ✅ Must preserve JSON property names (snake_case) + diff --git a/tools/converter-generator/README.md b/tools/converter-generator/README.md new file mode 100644 index 0000000..86628cb --- /dev/null +++ b/tools/converter-generator/README.md @@ -0,0 +1,165 @@ +# Converter Generator + +Stable converter generator for Kinde .NET SDK that generates Newtonsoft.Json converters for C# models with Option<> properties. + +## Features + +- ✅ **Type-safe**: Full type hints and dataclasses +- ✅ **Template-based**: Jinja2 templates for maintainable code generation +- ✅ **Configuration**: YAML config file support +- ✅ **Comprehensive**: Handles all property types (string, enum, List<>, Dictionary<>, Object) +- ✅ **Robust**: C# reserved keyword escaping, nested generics support +- ✅ **Tested**: Unit tests based on production requirements +- ✅ **Stable**: Proven parsing logic with better structure + +## Setup + +1. **Create virtual environment:** + ```bash + cd tools/converter-generator + ./setup-venv.sh + source venv/bin/activate + ``` + +2. **Install dependencies:** + ```bash + pip install -r requirements.txt + ``` + +## Usage + +### Generate Converters for Kinde.Api + +```bash +cd tools/converter-generator +python generate_converters.py --config config-kind-api.yaml +``` + +This will: +1. Generate all converters for models in `Kinde.Api/Model` +2. Automatically update `Kinde.Api/Client/ApiClient.cs` with converter registrations + +### Generate Converters for Kinde.Accounts + +```bash +cd tools/converter-generator +python generate_converters.py --config config-accounts-api.yaml +``` + +### Disable ApiClient.cs Update + +If you don't want to automatically update `ApiClient.cs`: + +```bash +python generate_converters.py --config config-kind-api.yaml --no-update-api-client +``` + +### Specify Custom ApiClient.cs Path + +```bash +python generate_converters.py --config config-kind-api.yaml --api-client Kinde.Api/Client/ApiClient.cs +``` + +### With Validation + +```bash +python generate_converters.py --config config-kind-api.yaml --validate +``` + +### With Verbose Output + +```bash +python generate_converters.py --config config-kind-api.yaml --verbose +``` + +### Custom Directories + +```bash +python generate_converters.py \ + --model-dir ../../Kinde.Api/Model \ + --converter-dir ../../Kinde.Api/Converters \ + --config config-kind-api.yaml +``` + +## Configuration Files + +- `config-kind-api.yaml` - Configuration for Kinde.Api (Management API) +- `config-accounts-api.yaml` - Configuration for Kinde.Accounts (Accounts API) + +Each config file specifies: +- Models to exclude from generation +- Model and converter directories (relative to project root) +- Validation settings +- Generation settings + +## Testing + +Run unit tests: + +```bash +# All tests +pytest tests/ -v + +# Specific test file +pytest tests/test_parser.py -v + +# With coverage +pytest tests/ --cov=generate_converters --cov-report=html +``` + +## Manually Maintained Converters + +The following converters are manually maintained and should NOT be regenerated: + +- `CustomEnumConverter.cs` - Generic enum converters used across all models + +These are excluded in the config files and will be skipped during generation. + +## Generated Converters + +All converters in the specified converter directories (except manually maintained ones) are generated by this script. They can be safely regenerated at any time. + +## Requirements + +- Python 3.11+ +- Virtual environment (venv) +- Dependencies from `requirements.txt` + +## Architecture + +- **Parser** (`CSharpModelParser`): Extracts properties from C# model files +- **Generator** (`ConverterGenerator`): Generates C# converter code using Jinja2 templates +- **Validator** (`ConverterValidator`): Validates generated converter structure +- **ApiClientUpdater** (`api_client_updater.py`): Automatically updates `ApiClient.cs` with converter registrations +- **CLI** (`main`): Command-line interface using Click + +## Automatic ApiClient.cs Updates + +The generator automatically updates `ApiClient.cs` to register all generated converters. This ensures: + +- ✅ New converters are automatically registered +- ✅ No manual editing of `ApiClient.cs` required +- ✅ Converter list stays in sync with generated files +- ✅ Proper categorization (Request, Response, Inner, manually maintained) + +The updater: +1. Scans the converter directory for all `*NewtonsoftConverter.cs` files +2. Categorizes them (Request, Response, Inner, manually maintained) +3. Updates the `CreateStandardConverters()` method in `ApiClient.cs` +4. Preserves comments and structure + +## Production Requirements + +The generator is tested against production Kinde requirements: + +1. ✅ Must extract Option<> properties correctly +2. ✅ Must extract required (non-Option) parameters +3. ✅ Must handle nested generics (List<>, Dictionary<>) +4. ✅ Must escape C# reserved keywords +5. ✅ Must find JSON property names from [JsonPropertyName] attributes +6. ✅ Must handle nullable types correctly +7. ✅ Must generate valid C# code that compiles +8. ✅ Must use PascalCase for property access in WriteJson +9. ✅ Must use camelCase for constructor parameters in ReadJson +10. ✅ Must preserve JSON property names (snake_case) + diff --git a/tools/converter-generator/api_client_updater.py b/tools/converter-generator/api_client_updater.py new file mode 100644 index 0000000..5ba463d --- /dev/null +++ b/tools/converter-generator/api_client_updater.py @@ -0,0 +1,365 @@ +""" +ApiClient.cs updater - automatically updates CreateStandardConverters method +with all generated converters found in the converter directory. +""" +from __future__ import annotations + +import re +from pathlib import Path +from typing import List, Dict, Set, Tuple, Optional +import logging + +logger = logging.getLogger(__name__) + + +class ApiClientUpdater: + """Updates ApiClient.cs to register all generated converters""" + + # Manually maintained converters that should always be included + # These are special converters that may not exist in the converter directory + # but should be registered if they exist + MANUALLY_MAINTAINED_NAMES = { + 'CreateUserRequestIdentitiesInnerNewtonsoftConverter', + 'CreateUserIdentityRequestNewtonsoftConverter' + } + + # Generic converters that should always be included + GENERIC_CONVERTERS = [ + 'NewtonsoftGenericEnumConverter', + 'OptionNewtonsoftConverter' + ] + + def __init__(self, converter_dir: Path, api_client_path: Path, additional_converter_dirs: Optional[List[Path]] = None, registry_path: Optional[Path] = None): + self.converter_dir = converter_dir + self.api_client_path = api_client_path + self.additional_converter_dirs = additional_converter_dirs or [] + # Registry file path - defaults to same directory as ApiClient.cs + if registry_path: + self.registry_path = registry_path + else: + self.registry_path = self.api_client_path.parent / "JsonConverterRegistry.cs" + + def scan_converters(self) -> Dict[str, List[Tuple[str, str]]]: + """ + Scan converter directory and additional directories, then categorize converters. + + Returns: + Dict with keys: 'request', 'response', 'inner', 'manually_maintained' + Values are lists of tuples: (converter_name, namespace_prefix) + """ + converters = { + 'request': [], + 'response': [], + 'inner': [], + 'manually_maintained': [] + } + + # Scan main converter directory + all_dirs = [self.converter_dir] + self.additional_converter_dirs + + for converter_dir in all_dirs: + if not converter_dir.exists(): + logger.debug(f"Converter directory not found: {converter_dir}") + continue + + # Find all converter files + converter_files = list(converter_dir.glob('*NewtonsoftConverter.cs')) + + # Determine namespace prefix based on directory location + # Check the actual namespace in the generated converter file + # Default to Kinde.Api.Converters (Accounts converters also use this namespace) + namespace_prefix = 'Kinde.Api.Converters' + + # Try to read the first converter file to determine namespace + if converter_files: + try: + with open(converter_files[0], 'r', encoding='utf-8') as f: + sample_content = f.read() + # Check for namespace declaration + namespace_match = re.search(r'namespace\s+([\w.]+)', sample_content) + if namespace_match: + namespace_prefix = namespace_match.group(1) + logger.debug(f"Detected namespace {namespace_prefix} from {converter_files[0].name}") + except Exception as e: + logger.debug(f"Could not read converter file to detect namespace: {e}") + + for converter_file in converter_files: + converter_name = converter_file.stem # e.g., "CreateUserRequestNewtonsoftConverter" + + # Skip if already added (avoid duplicates) + already_added = any(converter_name == name for cat_name in ['request', 'response', 'inner', 'manually_maintained'] for name, _ in converters[cat_name]) + if already_added: + continue + + # Check if this is a manually maintained converter + if converter_name in self.MANUALLY_MAINTAINED_NAMES: + converters['manually_maintained'].append((converter_name, namespace_prefix)) + continue + + # Categorize by naming pattern + if 'Request' in converter_name and 'Inner' not in converter_name: + converters['request'].append((converter_name, namespace_prefix)) + elif 'Response' in converter_name and 'Inner' not in converter_name: + converters['response'].append((converter_name, namespace_prefix)) + elif 'Inner' in converter_name: + converters['inner'].append((converter_name, namespace_prefix)) + else: + # Fallback: try to categorize by other patterns + if converter_name.startswith('Create') or converter_name.startswith('Update') or \ + converter_name.startswith('Add') or converter_name.startswith('Replace') or \ + converter_name.startswith('Set') or converter_name.startswith('Verify'): + converters['request'].append((converter_name, namespace_prefix)) + else: + converters['response'].append((converter_name, namespace_prefix)) + + # Sort each category alphabetically by converter name (first element of tuple) + for category in converters: + converters[category].sort(key=lambda x: x[0] if isinstance(x, tuple) else x) + + return converters + + def generate_converter_registrations(self, converters: Dict[str, List[Tuple[str, str]]]) -> str: + """ + Generate the converter registration code. + + Args: + converters: Categorized converter lists + + Returns: + C# code for converter registrations + """ + lines = [] + + # Generic converters + lines.append(" // Generic converters") + for converter in self.GENERIC_CONVERTERS: + lines.append(f" new Kinde.Api.Converters.{converter}(),") + lines.append("") + + # Manually maintained converters (always include - they're defined in CustomEnumConverter.cs) + # These should always be registered even if they don't exist as separate files + manually_maintained_to_include = [] + + # First, add any that were found in the converter directory + for converter_name, namespace_prefix in converters['manually_maintained']: + if converter_name not in [name for name, _ in manually_maintained_to_include]: + manually_maintained_to_include.append((converter_name, namespace_prefix)) + + # Then, always include all known manually maintained converters + # (they're defined in CustomEnumConverter.cs and should always be registered) + for converter_name in sorted(self.MANUALLY_MAINTAINED_NAMES): + if converter_name not in [name for name, _ in manually_maintained_to_include]: + manually_maintained_to_include.append((converter_name, 'Kinde.Api.Converters')) + + if manually_maintained_to_include: + lines.append(" // Request/Identity converters (manually maintained)") + for converter_name, namespace_prefix in sorted(manually_maintained_to_include, key=lambda x: x[0]): + lines.append(f" new {namespace_prefix}.{converter_name}(),") + lines.append("") + + # Request converters + if converters['request']: + lines.append(" // Request converters (alphabetically ordered)") + for converter_name, namespace_prefix in converters['request']: + lines.append(f" new {namespace_prefix}.{converter_name}(),") + lines.append("") + + # Response converters + if converters['response']: + lines.append(" // Response converters (alphabetically ordered)") + for converter_name, namespace_prefix in converters['response']: + lines.append(f" new {namespace_prefix}.{converter_name}(),") + lines.append("") + + # Inner model converters + if converters['inner']: + lines.append(" // Inner model converters (alphabetically ordered)") + for converter_name, namespace_prefix in converters['inner']: + lines.append(f" new {namespace_prefix}.{converter_name}(),") + + # Remove trailing newline + if lines and lines[-1] == "": + lines.pop() + + return "\n".join(lines) + + def update_api_client(self) -> bool: + """ + Generate JsonConverterRegistry.cs with all found converters and update ApiClient.cs to use it. + + Returns: + True if update was successful, False otherwise + """ + if not self.api_client_path.exists(): + logger.error(f"ApiClient.cs not found: {self.api_client_path}") + return False + + # Scan for converters + converters = self.scan_converters() + + # Read existing registry if it exists to preserve manually maintained converters + existing_manual = [] + if self.registry_path.exists(): + try: + with open(self.registry_path, 'r', encoding='utf-8') as f: + registry_content = f.read() + # Extract manually maintained converters from existing registry + manually_maintained_pattern = r'// Request/Identity converters \(manually maintained\)\s*\n(.*?)(?=\n\s*// [A-Z]|\n\s*$)' + manually_maintained_match = re.search(manually_maintained_pattern, registry_content, re.DOTALL) + if manually_maintained_match: + existing_manual = re.findall(r'new (?:Kinde\.Api\.(?:Accounts\.)?Converters)\.(\w+)\(\)', manually_maintained_match.group(1)) + except Exception as e: + logger.debug(f"Could not read existing registry: {e}") + + # Add manually maintained converters if they exist in the registry + for converter_name in existing_manual: + if converter_name in self.MANUALLY_MAINTAINED_NAMES: + already_added = any(converter_name == name for name, _ in converters['manually_maintained']) + if not already_added: + converters['manually_maintained'].append((converter_name, 'Kinde.Api.Converters')) + logger.debug(f"Preserved manually maintained converter from registry: {converter_name}") + + logger.info(f"Found {sum(len(v) for v in converters.values())} converters: " + f"{len(converters['request'])} Request, " + f"{len(converters['response'])} Response, " + f"{len(converters['inner'])} Inner, " + f"{len(converters['manually_maintained'])} manually maintained") + + # Generate converter registrations + converter_registrations = self.generate_converter_registrations(converters) + + # Generate the registry file + try: + from jinja2 import Template + template_content = """// +// This file is automatically generated by the converter generator. +// DO NOT EDIT THIS FILE MANUALLY - your changes will be overwritten. +// To regenerate this file, run: python generate_converters.py --config +// + +using System; +using System.Collections.Generic; +using Newtonsoft.Json; + +namespace Kinde.Api.Client +{ + /// + /// Auto-generated registry of all JSON converters. + /// This class is automatically generated - do not edit manually. + /// + internal static class JsonConverterRegistry + { + /// + /// Creates the standard converter collection for JSON serialization. + /// This method is auto-generated - do not edit manually. + /// + public static IList CreateStandardConverters() + { + return new List + { +{{ converter_registrations }} + }; + } + } +} +""" + template = Template(template_content) + registry_content = template.render(converter_registrations=converter_registrations) + + with open(self.registry_path, 'w', encoding='utf-8') as f: + f.write(registry_content) + logger.info(f"Generated {self.registry_path} with {sum(len(v) for v in converters.values())} converters") + except Exception as e: + logger.error(f"Failed to generate registry file: {e}") + return False + + # Update ApiClient.cs to use the registry + try: + with open(self.api_client_path, 'r', encoding='utf-8') as f: + content = f.read() + except Exception as e: + logger.error(f"Failed to read ApiClient.cs: {e}") + return False + + # Check if ApiClient.cs already uses the registry + if 'JsonConverterRegistry.CreateStandardConverters()' in content: + logger.debug("ApiClient.cs already uses JsonConverterRegistry") + return True + + # Find the CreateStandardConverters method and replace it + method_pattern = r'(public static IList CreateStandardConverters\(\)\s*\{[^}]*return new List\s*\{)(.*?)(\s*\};\s*\})' + + match = re.search(method_pattern, content, re.DOTALL) + if match: + # Replace the entire method body with a call to the registry + replacement = """public static IList CreateStandardConverters() + { + return JsonConverterRegistry.CreateStandardConverters(); + }""" + new_content = content[:match.start()] + replacement + content[match.end():] + else: + # Method not found, try to find JsonConverterHelper class + helper_pattern = r'(internal static class JsonConverterHelper\s*\{[^}]*/// [^}]*/// Creates the standard converter collection[^}]*/// [^}]*public static IList CreateStandardConverters\(\)\s*\{[^}]*return new List\s*\{)(.*?)(\s*\};\s*\})' + match = re.search(helper_pattern, content, re.DOTALL) + if match: + replacement = """internal static class JsonConverterHelper + { + /// + /// Creates the standard converter collection for JSON serialization + /// + public static IList CreateStandardConverters() + { + return JsonConverterRegistry.CreateStandardConverters(); + }""" + new_content = content[:match.start()] + replacement + content[match.end():] + else: + logger.warning("Could not find CreateStandardConverters method in ApiClient.cs - registry generated but ApiClient.cs not updated") + return True + + # Write back + try: + with open(self.api_client_path, 'w', encoding='utf-8') as f: + f.write(new_content) + logger.info(f"Updated {self.api_client_path} to use JsonConverterRegistry") + return True + except Exception as e: + logger.error(f"Failed to write ApiClient.cs: {e}") + return False + + def find_api_client_path(self, project_root: Path, converter_dir: Path) -> Optional[Path]: + """ + Find ApiClient.cs path based on converter directory location. + + Args: + project_root: Project root directory + converter_dir: Converter directory (e.g., Kinde.Api/Converters) + + Returns: + Path to ApiClient.cs or None if not found + """ + # Try to infer the API client path from converter directory + # If converter_dir is "Kinde.Api/Converters", ApiClient should be at "Kinde.Api/Client/ApiClient.cs" + # If converter_dir is "Kinde.Api/Accounts/Converters", ApiClient should be at "Kinde.Api/Accounts/Client/ApiClient.cs" + + converter_path = Path(converter_dir) + + # Remove "Converters" from path and add "Client/ApiClient.cs" + if converter_path.name == "Converters": + client_dir = converter_path.parent / "Client" + api_client = client_dir / "ApiClient.cs" + full_path = project_root / api_client + if full_path.exists(): + return full_path + + # Fallback: try common locations + common_paths = [ + project_root / "Kinde.Api" / "Client" / "ApiClient.cs", + project_root / "Kinde.Api" / "ApiClient.cs", + ] + + for path in common_paths: + if path.exists(): + return path + + return None + diff --git a/tools/converter-generator/config-accounts-api.yaml b/tools/converter-generator/config-accounts-api.yaml new file mode 100644 index 0000000..d1c2875 --- /dev/null +++ b/tools/converter-generator/config-accounts-api.yaml @@ -0,0 +1,19 @@ +# Configuration for Kinde.Accounts converter generation + +# Models to exclude from converter generation +excluded_models: + - ErrorResponse + +# Directories (relative to project root) +model_dir: "Kinde.Api/Accounts/Model" +converter_dir: "Kinde.Api/Accounts/Converters" + +# Validation settings +validate_generated: true +compile_check: true + +# Generation settings +generate_inner_models: true +generate_request_models: true +generate_response_models: true + diff --git a/tools/converter-generator/config-kind-api.yaml b/tools/converter-generator/config-kind-api.yaml new file mode 100644 index 0000000..b4297d9 --- /dev/null +++ b/tools/converter-generator/config-kind-api.yaml @@ -0,0 +1,21 @@ +# Configuration for Kinde.Api converter generation + +# Models to exclude from converter generation +excluded_models: + - CreateUserRequestIdentitiesInner # Manually maintained + - CreateUserIdentityRequest # Manually maintained + - ErrorResponse + +# Directories (relative to project root) +model_dir: "Kinde.Api/Model" +converter_dir: "Kinde.Api/Converters" + +# Validation settings +validate_generated: true +compile_check: true + +# Generation settings +generate_inner_models: true +generate_request_models: true +generate_response_models: true + diff --git a/tools/converter-generator/generate_converters.py b/tools/converter-generator/generate_converters.py new file mode 100644 index 0000000..4da0ef1 --- /dev/null +++ b/tools/converter-generator/generate_converters.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python3 +""" +Converter generator for Kinde .NET SDK + +Generates Newtonsoft.Json converters for C# models with Option<> properties. +This generator extracts properties from both constructor parameters and property definitions, +handles nested generics, escapes C# reserved keywords, and generates type-safe converters. + +Features: +- Type hints and dataclasses for clarity +- Jinja2 templates for maintainable code generation +- Configuration file support (YAML) +- Comprehensive logging +- C# reserved keyword escaping +- Support for all property types (string, enum, List<>, Dictionary<>, Object) +""" +from __future__ import annotations + +import os +import re +import sys +import logging +from pathlib import Path +from dataclasses import dataclass, field +from typing import Optional, Dict, List, Set, Tuple, Any + +import click +import yaml +from jinja2 import Environment, FileSystemLoader, select_autoescape + +# Import ApiClient updater +try: + from api_client_updater import ApiClientUpdater +except ImportError: + # If running as module, try relative import + try: + from .api_client_updater import ApiClientUpdater + except ImportError: + ApiClientUpdater = None + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + + +# C# reserved keywords that need @ prefix +RESERVED_KEYWORDS = { + 'event', 'class', 'namespace', 'string', 'object', 'bool', 'int', 'long', 'float', 'double', + 'decimal', 'char', 'byte', 'sbyte', 'short', 'ushort', 'uint', 'ulong', 'void', 'null', + 'true', 'false', 'if', 'else', 'for', 'foreach', 'while', 'do', 'switch', 'case', 'default', + 'break', 'continue', 'return', 'try', 'catch', 'finally', 'throw', 'new', 'this', 'base', + 'public', 'private', 'protected', 'internal', 'static', 'readonly', 'const', 'virtual', + 'override', 'abstract', 'sealed', 'partial', 'async', 'await', 'using', 'namespace', + 'class', 'struct', 'interface', 'enum', 'delegate', 'var', 'dynamic', 'ref', 'out', 'in', + 'params', 'where', 'select', 'from', 'group', 'orderby', 'join', 'let', 'into', 'on', + 'equals', 'by', 'ascending', 'descending' +} + +def escape_csharp_keyword(name: str) -> str: + """Escape C# reserved keywords with @ prefix""" + if name.lower() in RESERVED_KEYWORDS: + return '@' + name + return name + + +@dataclass +class PropertyInfo: + """Information about a model property""" + name: str # camelCase constructor parameter name + pascal_name: str # PascalCase property name + json_name: str + csharp_type: str + is_option: bool + is_nullable: bool + is_required: bool + enum_type: Optional[str] = None + generic_args: List[str] = field(default_factory=list) + + @property + def is_string(self) -> bool: + return self.csharp_type in ('string', 'string?') + + @property + def is_enum(self) -> bool: + return 'Enum' in self.csharp_type + + @property + def is_list(self) -> bool: + return self.csharp_type.startswith('List<') + + @property + def is_dictionary(self) -> bool: + return self.csharp_type.startswith('Dictionary<') + + +@dataclass +class ModelInfo: + """Information about a C# model class""" + name: str + namespace: str + properties: List[PropertyInfo] = field(default_factory=list) + required_params: List[PropertyInfo] = field(default_factory=list) + + @property + def has_option_properties(self) -> bool: + return any(prop.is_option for prop in self.properties) + + @property + def needs_converter(self) -> bool: + return self.has_option_properties or len(self.required_params) > 0 + + +class CSharpModelParser: + """Parse C# model files using proven regex-based approach from v1""" + + def parse_model(self, file_path: Path) -> Optional[ModelInfo]: + """Parse a C# model file and extract model information""" + try: + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + except Exception as e: + logger.error(f"Error reading {file_path}: {e}") + return None + + # Find the class name + class_match = re.search(r'public\s+partial\s+class\s+(\w+)', content) + if not class_match: + return None + + model_name = class_match.group(1) + + # Extract namespace + namespace_match = re.search(r'namespace\s+([\w.]+)', content) + namespace = namespace_match.group(1) if namespace_match else "Kinde.Api.Model" + + # Extract constructor + constructor_match = re.search( + r'public\s+' + re.escape(model_name) + r'\(([^)]+)\)', + content + ) + if not constructor_match: + return None + + properties = [] + required_params = [] + + # Parse constructor parameters first to get all parameter names + constructor_params_str = constructor_match.group(1) + params = self._split_parameters(constructor_params_str) + + # Build a map of constructor parameters + constructor_params = {} + for param in params: + prop_info = self._parse_parameter(param, model_name, content) + if prop_info: + if prop_info.is_required: + required_params.append(prop_info) + else: + constructor_params[prop_info.name] = prop_info + + # Extract properties from property definitions (like v1 does) + # Pattern: [JsonPropertyName("json_name")] public Type? PropertyName { get { return this.PropertyNameOption; } set { this.PropertyNameOption = new Option(value); } } + prop_pattern = r'\[JsonPropertyName\("([^"]+)"\)\]\s+public\s+([^\?]+)\?\s+(\w+)\s+\{\s+get\s+\{\s+return\s+this\.(\w+)Option;\s+\}\s+set' + + for match in re.finditer(prop_pattern, content): + json_name = match.group(1) + prop_type = match.group(2).strip() + prop_name = match.group(3) # PascalCase property name + option_prop_name = match.group(4) # Option property name (without "Option" suffix) + + # Find the constructor parameter name (camelCase version of property name) + constructor_param_name = None + for param_name in constructor_params.keys(): + # Match by property name (case-insensitive) or by option property name + if param_name.lower() == prop_name.lower() or param_name.lower() == option_prop_name.lower(): + constructor_param_name = param_name + break + + # If not found, try to infer from property name + if not constructor_param_name: + # Property names are PascalCase, constructor params are camelCase + constructor_param_name = prop_name[0].lower() + prop_name[1:] if prop_name else None + + # Verify the Option property exists + option_pattern = rf'{option_prop_name}Option\s+\{{\s+get' + if re.search(option_pattern, content) and constructor_param_name: + # Check if we already have this in constructor_params + if constructor_param_name in constructor_params: + # Update with property name info + prop_info = constructor_params[constructor_param_name] + prop_info.json_name = json_name + prop_info.pascal_name = prop_name # Store PascalCase property name + properties.append(prop_info) + else: + # Create new property info + # Extract enum type if applicable + enum_type = None + if 'Enum' in prop_type: + enum_match = re.search(r'(\w+Enum)', prop_type) + if enum_match: + enum_type = enum_match.group(1) + + # Check if nullable + is_nullable = prop_type.endswith('?') + if is_nullable: + prop_type = prop_type.rstrip('?') + + # Escape reserved keywords + escaped_name = escape_csharp_keyword(constructor_param_name) + + prop_info = PropertyInfo( + name=escaped_name, # Use camelCase constructor param name (escaped) + pascal_name=prop_name, # Store PascalCase property name + json_name=json_name, + csharp_type=prop_type, + is_option=True, + is_nullable=is_nullable, + is_required=False, + enum_type=enum_type + ) + properties.append(prop_info) + + return ModelInfo( + name=model_name, + namespace=namespace, + properties=properties, + required_params=required_params + ) + + def _split_parameters(self, params_str: str) -> List[str]: + """Split constructor parameters, handling nested generics (from v1)""" + params = [] + current_param = "" + bracket_depth = 0 + + for char in params_str: + if char == '<': + bracket_depth += 1 + current_param += char + elif char == '>': + bracket_depth -= 1 + current_param += char + elif char == ',' and bracket_depth == 0: + params.append(current_param.strip()) + current_param = "" + else: + current_param += char + + if current_param.strip(): + params.append(current_param.strip()) + + return params + + def _parse_parameter( + self, + param_str: str, + model_name: str, + file_content: str + ) -> Optional[PropertyInfo]: + """Parse a single constructor parameter (based on v1 logic)""" + param_str = param_str.strip() + + # Check if it's an Option<> parameter + if 'Option<' in param_str: + # Extract Option paramName = default + option_match = re.search(r'Option<([^>]+)>\s+(\w+)\s*=', param_str) + if option_match: + param_type = option_match.group(1).strip() + param_name = option_match.group(2) + + # Find JSON property name + json_name = self._find_json_property_name(param_name, file_content) + + # Extract enum type if applicable + enum_type = None + if 'Enum' in param_type: + enum_match = re.search(r'(\w+Enum)', param_type) + if enum_match: + enum_type = enum_match.group(1) + + # Check if nullable + is_nullable = param_type.endswith('?') + if is_nullable: + param_type = param_type.rstrip('?') + + # Convert camelCase to PascalCase for property name + pascal_name = param_name[0].upper() + param_name[1:] if param_name else param_name + + # Escape reserved keywords + escaped_name = escape_csharp_keyword(param_name) + + return PropertyInfo( + name=escaped_name, + pascal_name=pascal_name, + json_name=json_name, + csharp_type=param_type, + is_option=True, + is_nullable=is_nullable, + is_required=False, + enum_type=enum_type + ) + else: + # Required parameter (no Option<>) + # Format: Type paramName or Type paramName = value + required_match = re.search(r'(\w+(?:<[^>]+>)?)\s+(\w+)(?:\s*=|$)', param_str) + if required_match: + param_type = required_match.group(1).strip() + param_name = required_match.group(2) + + # Find JSON property name + json_name = self._find_json_property_name(param_name, file_content) + + # Extract enum type if applicable + enum_type = None + if 'Enum' in param_type: + enum_match = re.search(r'(\w+Enum)', param_type) + if enum_match: + enum_type = enum_match.group(1) + + # Check if nullable + is_nullable = param_type.endswith('?') + if is_nullable: + param_type = param_type.rstrip('?') + + # Convert camelCase to PascalCase for property name + pascal_name = param_name[0].upper() + param_name[1:] if param_name else param_name + + # Escape reserved keywords + escaped_name = escape_csharp_keyword(param_name) + + return PropertyInfo( + name=escaped_name, + pascal_name=pascal_name, + json_name=json_name, + csharp_type=param_type, + is_option=False, + is_nullable=is_nullable, + is_required=True, + enum_type=enum_type + ) + + return None + + def _find_json_property_name(self, param_name: str, file_content: str) -> str: + """Find the JSON property name from [JsonPropertyName] attribute (from v1)""" + # Look for property with this name + # Pattern: [JsonPropertyName("json_name")] public Type PropertyName + pattern = rf'\[JsonPropertyName\("([^"]+)"\)\]\s+public\s+\S+\s+{re.escape(param_name)}\s*{{' + match = re.search(pattern, file_content) + if match: + return match.group(1) + + # Fallback: convert camelCase to snake_case + return re.sub(r'(? str: + """Generate converter code for a model""" + template = self.env.get_template('converter.cs.j2') + return template.render(model=model_info) + + +class ConverterValidator: + """Validate generated converter code""" + + def validate(self, converter_file: Path) -> Tuple[bool, Optional[str]]: + """Validate that generated converter has correct structure""" + try: + with open(converter_file, 'r', encoding='utf-8') as f: + content = f.read() + + # Basic syntax checks + if 'public class' not in content: + return False, "Missing class declaration" + if 'ReadJson' not in content: + return False, "Missing ReadJson method" + if 'WriteJson' not in content: + return False, "Missing WriteJson method" + + return True, None + except Exception as e: + return False, str(e) + + +@click.command() +@click.option('--model-dir', help='Directory containing C# model files (relative to project root)') +@click.option('--converter-dir', help='Directory for generated converters (relative to project root)') +@click.option('--config', type=click.Path(exists=True), help='Configuration YAML file') +@click.option('--api-client', type=click.Path(exists=True), help='Path to ApiClient.cs (relative to project root, auto-detected if not provided)') +@click.option('--update-api-client', is_flag=True, default=True, help='Automatically update ApiClient.cs with generated converters') +@click.option('--validate', is_flag=True, help='Validate generated converters') +@click.option('--verbose', '-v', is_flag=True, help='Verbose output') +def main(model_dir: Optional[str], converter_dir: Optional[str], config: Optional[str], + api_client: Optional[str], update_api_client: bool, validate: bool, verbose: bool): + """Generate Newtonsoft.Json converters for C# models""" + + if verbose: + logging.getLogger().setLevel(logging.DEBUG) + + # Determine project root (parent of tools directory) + # Script is in tools/converter-generator/, so project root is tools/converter-generator/../../ + script_dir = Path(__file__).parent + # If script is in tools/converter-generator/, project root is two levels up + # Otherwise, assume we're in the project root + if script_dir.name == 'converter-generator' and script_dir.parent.name == 'tools': + project_root = script_dir.parent.parent + else: + # Fallback: assume script is in project root + project_root = script_dir + + # Load configuration + config_data = {} + excluded_models = set() + if config: + try: + config_path = Path(config) + if not config_path.is_absolute(): + # If relative, try relative to script first, then project root + if (script_dir / config_path).exists(): + config_path = script_dir / config_path + elif (project_root / config_path).exists(): + config_path = project_root / config_path + + with open(config_path, 'r') as f: + config_data = yaml.safe_load(f) or {} + excluded_models = set(config_data.get('excluded_models', [])) + + # Use config values if not provided via CLI + if not model_dir: + model_dir = config_data.get('model_dir', 'Kinde.Api/Model') + if not converter_dir: + converter_dir = config_data.get('converter_dir', 'Kinde.Api/Converters') + except Exception as e: + logger.warning(f"Could not load config file: {e}") + if not model_dir: + model_dir = 'Kinde.Api/Model' + if not converter_dir: + converter_dir = 'Kinde.Api/Converters' + else: + if not model_dir: + model_dir = 'Kinde.Api/Model' + if not converter_dir: + converter_dir = 'Kinde.Api/Converters' + + # Resolve paths relative to project root + model_path = (project_root / model_dir).resolve() + converter_path = (project_root / converter_dir).resolve() + converter_path.mkdir(parents=True, exist_ok=True) + + # Initialize components + parser = CSharpModelParser() + # Template directory is relative to this script + template_dir = Path(__file__).parent / 'templates' + if not template_dir.exists(): + logger.error(f"Template directory not found: {template_dir}") + return + + generator = ConverterGenerator(template_dir) + validator = ConverterValidator() + + # Find model files + # Include Response, Request, Inner models, and nested models (like GetBusinessResponseBusiness) + model_files = list(model_path.glob('*Response.cs')) + \ + list(model_path.glob('*Request.cs')) + \ + list(model_path.glob('*Inner.cs')) + + # Also find nested models that don't follow the *Response/*Request/*Inner pattern + # These are typically nested classes like GetBusinessResponseBusiness, GetEnvironmentResponseEnvironment + # They're identified by having Option<> properties but not matching the standard patterns + all_model_files = list(model_path.glob('*.cs')) + for model_file in all_model_files: + if model_file.stem not in excluded_models and model_file not in model_files: + # Check if this model has Option properties by parsing it + try: + with open(model_file, 'r', encoding='utf-8') as f: + content = f.read() + # Check if it's a model class with Option properties + if 'public partial class' in content and 'Option<' in content: + model_files.append(model_file) + logger.debug(f"Found nested model with Option properties: {model_file.name}") + except Exception as e: + logger.debug(f"Could not check {model_file.name}: {e}") + + # Filter out excluded models + model_files = [f for f in model_files if f.stem not in excluded_models] + + generated = 0 + skipped = 0 + errors = [] + + for model_file in sorted(model_files): + model_name = model_file.stem + + # Parse model + model_info = parser.parse_model(model_file) + if not model_info: + errors.append(f"Could not parse {model_name}") + continue + + # Skip if no Option properties and no required params + if not model_info.needs_converter: + continue + + # Generate converter + try: + converter_code = generator.generate(model_info) + converter_file = converter_path / f"{model_name}NewtonsoftConverter.cs" + + with open(converter_file, 'w', encoding='utf-8') as f: + f.write(converter_code) + + # Validate if requested + if validate: + is_valid, error = validator.validate(converter_file) + if not is_valid: + errors.append(f"{model_name}: {error}") + continue + + logger.info(f"Generated converter for {model_name} ({len(model_info.properties)} Option properties, {len(model_info.required_params)} required params)") + generated += 1 + except Exception as e: + logger.error(f"Error generating {model_name}: {e}") + errors.append(f"{model_name}: {e}") + + # Update ApiClient.cs if requested + if update_api_client: + if ApiClientUpdater is None: + logger.warning("ApiClientUpdater not available, skipping ApiClient.cs update") + else: + try: + # Find ApiClient.cs path + if api_client: + # Resolve path - could be relative to script, project root, or absolute + api_client_path = Path(api_client) + if not api_client_path.is_absolute(): + # Try relative to project root first + if (project_root / api_client_path).exists(): + api_client_path = (project_root / api_client_path).resolve() + # Try relative to script directory + elif (script_dir / api_client_path).exists(): + api_client_path = (script_dir / api_client_path).resolve() + else: + # Assume relative to project root + api_client_path = (project_root / api_client_path).resolve() + else: + api_client_path = api_client_path.resolve() + else: + # Try to find it automatically + updater_temp = ApiClientUpdater(converter_path, project_root) + api_client_path = updater_temp.find_api_client_path(project_root, Path(converter_dir)) + + if api_client_path and api_client_path.exists(): + # If updating the main ApiClient.cs, also scan the other API's converters to merge them + additional_dirs = [] + if 'Kinde.Api/Client/ApiClient.cs' in str(api_client_path) or 'Kinde.Api\\Client\\ApiClient.cs' in str(api_client_path): + # This is the main ApiClient.cs, merge converters from both APIs + # If we're generating Kinde.Api converters, also scan Accounts converters + if 'Kinde.Api/Converters' in str(converter_path) or 'Kinde.Api\\Converters' in str(converter_path): + accounts_converter_dir = project_root / 'Kinde.Api' / 'Accounts' / 'Converters' + if accounts_converter_dir.exists(): + additional_dirs.append(accounts_converter_dir.resolve()) + logger.info(f"Also scanning Accounts converters from {accounts_converter_dir}") + # If we're generating Accounts converters, also scan main API converters + elif 'Accounts/Converters' in str(converter_path) or 'Accounts\\Converters' in str(converter_path): + main_converter_dir = project_root / 'Kinde.Api' / 'Converters' + if main_converter_dir.exists(): + additional_dirs.append(main_converter_dir.resolve()) + logger.info(f"Also scanning main API converters from {main_converter_dir}") + + # Ensure all paths are absolute + converter_path = converter_path.resolve() + api_client_path = api_client_path.resolve() + + updater = ApiClientUpdater(converter_path, api_client_path, additional_dirs) + if updater.update_api_client(): + click.echo(f"✅ Updated {api_client_path.relative_to(project_root)} with converter registrations") + else: + click.echo(f"⚠️ Failed to update ApiClient.cs") + else: + logger.warning(f"ApiClient.cs not found at expected location, skipping update. Use --api-client to specify path.") + except Exception as e: + logger.error(f"Error updating ApiClient.cs: {e}") + click.echo(f"⚠️ Error updating ApiClient.cs: {e}") + + # Summary + click.echo(f"\n✅ Generated {generated} converters, skipped {skipped}") + if errors: + click.echo(f"\n❌ Errors ({len(errors)}):") + for error in errors[:10]: + click.echo(f" {error}") + + +if __name__ == '__main__': + main() diff --git a/tools/converter-generator/pytest.ini b/tools/converter-generator/pytest.ini new file mode 100644 index 0000000..83220d7 --- /dev/null +++ b/tools/converter-generator/pytest.ini @@ -0,0 +1,14 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = + -v + --tb=short + --strict-markers +markers = + unit: Unit tests + integration: Integration tests + slow: Slow running tests + diff --git a/tools/converter-generator/requirements.txt b/tools/converter-generator/requirements.txt new file mode 100644 index 0000000..92964cf --- /dev/null +++ b/tools/converter-generator/requirements.txt @@ -0,0 +1,15 @@ +# Python requirements for converter generator +# Install with: pip install -r requirements.txt + +# Core dependencies +jinja2>=3.1.2 +click>=8.1.7 +pyyaml>=6.0.1 + +# Development and testing +pytest>=7.4.3 +pytest-cov>=4.1.0 + +# Type stubs +types-pyyaml>=6.0.12.12 + diff --git a/tools/converter-generator/setup-venv.sh b/tools/converter-generator/setup-venv.sh new file mode 100755 index 0000000..0231084 --- /dev/null +++ b/tools/converter-generator/setup-venv.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Setup Python virtual environment for converter generator + +set -e + +echo "Setting up Python virtual environment..." + +# Create venv if it doesn't exist +if [ ! -d "venv" ]; then + python3 -m venv venv + echo "Created virtual environment" +fi + +# Activate venv +source venv/bin/activate + +# Upgrade pip +pip install --upgrade pip + +# Install requirements +pip install -r requirements.txt + +echo "" +echo "Virtual environment setup complete!" +echo "To activate: source venv/bin/activate" +echo "To run generator: python generate-converters-v2.py" + diff --git a/tools/converter-generator/templates/converter.cs.j2 b/tools/converter-generator/templates/converter.cs.j2 new file mode 100644 index 0000000..d991d45 --- /dev/null +++ b/tools/converter-generator/templates/converter.cs.j2 @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using {{ model.namespace }}; +{% if model.namespace.startswith('Kinde.Accounts') %} +using Kinde.Accounts.Client; +{% else %} +using Kinde.Api.Client; +{% endif %} + +{% if model.namespace.startswith('Kinde.Accounts') %} +namespace Kinde.Api.Accounts.Converters +{% else %} +namespace Kinde.Api.Converters +{% endif %} +{ + /// + /// Newtonsoft.Json converter for {{ model.name }} that handles the Option<> structure + /// + public class {{ model.name }}NewtonsoftConverter : Newtonsoft.Json.JsonConverter<{{ model.name }}> + { + public override bool CanRead => true; + public override bool CanWrite => true; + + public override {{ model.name }} ReadJson(Newtonsoft.Json.JsonReader reader, Type objectType, {{ model.name }} existingValue, bool hasExistingValue, Newtonsoft.Json.JsonSerializer serializer) + { + if (reader.TokenType != Newtonsoft.Json.JsonToken.StartObject) + { + throw new Newtonsoft.Json.JsonException($"Expected StartObject, got {reader.TokenType}"); + } + + var jsonObject = JObject.Load(reader); + +{% for prop in model.properties + model.required_params %} + {% if prop.is_dictionary %} + {{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %} {{ prop.name }} = default({{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}); + if (jsonObject["{{ prop.json_name }}"] != null) + { + {{ prop.name }} = jsonObject["{{ prop.json_name }}"].ToObject<{{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}>(serializer); + } + {% elif prop.is_list %} + {{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %} {{ prop.name }} = default({{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}); + if (jsonObject["{{ prop.json_name }}"] != null) + { + {{ prop.name }} = jsonObject["{{ prop.json_name }}"].ToObject<{{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}>(serializer); + } + {% elif prop.is_string %} + {{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %} {{ prop.name }} = default({{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}); + if (jsonObject["{{ prop.json_name }}"] != null) + { + {{ prop.name }} = jsonObject["{{ prop.json_name }}"].ToObject<{{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}>(); + } + {% elif prop.is_enum %} + {{ model.name }}.{{ prop.enum_type }}{% if prop.is_nullable %}?{% endif %} {{ prop.name }} = default({{ model.name }}.{{ prop.enum_type }}{% if prop.is_nullable %}?{% endif %}); + if (jsonObject["{{ prop.json_name }}"] != null) + { + var {{ prop.name }}Str = jsonObject["{{ prop.json_name }}"].ToObject(); + if (!string.IsNullOrEmpty({{ prop.name }}Str)) + { + {{ prop.name }} = {{ model.name }}.{{ prop.enum_type }}FromString({{ prop.name }}Str); + } + } + {% else %} + {{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %} {{ prop.name }} = default({{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}); + if (jsonObject["{{ prop.json_name }}"] != null) + { + {{ prop.name }} = jsonObject["{{ prop.json_name }}"].ToObject<{{ prop.csharp_type }}{% if prop.is_nullable %}?{% endif %}>(serializer); + } + {% endif %} +{% endfor %} + + return new {{ model.name }}( +{% for prop in model.properties %} + {{ prop.name }}: {% if prop.is_option %}{{ prop.name }} != null ? new Option<{% if prop.is_dictionary or prop.is_list %}{{ prop.csharp_type }}{% elif prop.is_enum %}{{ model.name }}.{{ prop.enum_type }}{% else %}{{ prop.csharp_type }}{% endif %}{% if prop.is_nullable or prop.is_list %}?{% endif %}>({{ prop.name }}) : default{% else %}{% if prop.is_nullable %}{{ prop.name }} != null ? new Option<{% if prop.is_enum %}{{ model.name }}.{{ prop.enum_type }}{% else %}{{ prop.csharp_type }}{% endif %}?>({{ prop.name }}) : default{% else %}{{ prop.name }}{% endif %}{% endif %}{% if not (loop.last and model.required_params|length == 0) %}, {% endif %} +{% endfor %} +{% if model.required_params %} +{% for param in model.required_params %} + {{ param.name }}: {{ param.name }}{% if not loop.last %}, {% endif %} +{% endfor %} +{% endif %} + ); + } + + public override void WriteJson(Newtonsoft.Json.JsonWriter writer, {{ model.name }} value, Newtonsoft.Json.JsonSerializer serializer) + { + writer.WriteStartObject(); + +{% for prop in model.properties %} + if (value.{{ prop.pascal_name }}Option.IsSet{% if prop.is_nullable %} && value.{{ prop.pascal_name }} != null{% endif %}) + { + writer.WritePropertyName("{{ prop.json_name }}"); +{% if prop.is_enum and not prop.is_dictionary and not prop.is_list %} + var {{ prop.name }}Str = {{ model.name }}.{{ prop.enum_type }}ToJsonValue(value.{{ prop.pascal_name }}.Value); + writer.WriteValue({{ prop.name }}Str); +{% else %} + serializer.Serialize(writer, value.{{ prop.pascal_name }}); +{% endif %} + } +{% endfor %} + + writer.WriteEndObject(); + } + } +} diff --git a/tools/converter-generator/templates/converter_registry.cs.j2 b/tools/converter-generator/templates/converter_registry.cs.j2 new file mode 100644 index 0000000..7fbe82b --- /dev/null +++ b/tools/converter-generator/templates/converter_registry.cs.j2 @@ -0,0 +1,32 @@ +// +// This file is automatically generated by the converter generator. +// DO NOT EDIT THIS FILE MANUALLY - your changes will be overwritten. +// To regenerate this file, run: python generate_converters.py --config +// + +using System; +using System.Collections.Generic; +using Newtonsoft.Json; + +namespace Kinde.Api.Converters +{ + /// + /// Auto-generated registry of all JSON converters. + /// This class is automatically generated - do not edit manually. + /// + internal static class JsonConverterRegistry + { + /// + /// Creates the standard converter collection for JSON serialization. + /// This method is auto-generated - do not edit manually. + /// + public static IList CreateStandardConverters() + { + return new List + { +{{ converter_registrations }} + }; + } + } +} + diff --git a/tools/converter-generator/tests/__init__.py b/tools/converter-generator/tests/__init__.py new file mode 100644 index 0000000..d30aa22 --- /dev/null +++ b/tools/converter-generator/tests/__init__.py @@ -0,0 +1,2 @@ +# Tests for converter generator + diff --git a/tools/converter-generator/tests/test_generator.py b/tools/converter-generator/tests/test_generator.py new file mode 100644 index 0000000..a897bd6 --- /dev/null +++ b/tools/converter-generator/tests/test_generator.py @@ -0,0 +1,342 @@ +""" +Unit tests for ConverterGenerator + +Tests based on production Kinde requirements: +- Must generate valid C# code +- Must handle all property types correctly +- Must generate proper Option<> wrapping +- Must use PascalCase for property access in WriteJson +- Must use camelCase for constructor parameters in ReadJson +- Must escape C# reserved keywords +""" +import pytest +from pathlib import Path +import tempfile +import os +import re + +import sys +from pathlib import Path + +# Add parent directory to path (tools/converter-generator) +parent_dir = Path(__file__).parent.parent +sys.path.insert(0, str(parent_dir)) + +from generate_converters import ( + CSharpModelParser, + ConverterGenerator, + PropertyInfo, + ModelInfo +) + + +class TestConverterGenerator: + """Test ConverterGenerator with production-like models""" + + def setup_method(self): + """Set up test fixtures""" + self.parser = CSharpModelParser() + template_dir = Path(__file__).parent.parent / "templates" + self.generator = ConverterGenerator(template_dir) + + def test_generate_simple_converter(self): + """Test generating a simple converter for a model with Option<> properties""" + model_info = ModelInfo( + name="GetSimpleResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="code", + pascal_name="Code", + json_name="code", + csharp_type="string", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ), + PropertyInfo( + name="message", + pascal_name="Message", + json_name="message", + csharp_type="string", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check basic structure + assert "public class GetSimpleResponseNewtonsoftConverter" in converter_code + assert "ReadJson" in converter_code + assert "WriteJson" in converter_code + + # Check ReadJson + assert 'string? code = default(string?);' in converter_code + assert 'jsonObject["code"]' in converter_code + assert 'new Option(code)' in converter_code + + # Check WriteJson + assert 'value.CodeOption.IsSet' in converter_code + assert 'value.Code' in converter_code + assert 'writer.WritePropertyName("code")' in converter_code + + def test_generate_converter_with_required_params(self): + """Test generating a converter with required (non-Option) parameters""" + model_info = ModelInfo( + name="CreateSimpleRequest", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="description", + pascal_name="Description", + json_name="description", + csharp_type="string", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[ + PropertyInfo( + name="name", + pascal_name="Name", + json_name="name", + csharp_type="string", + is_option=False, + is_nullable=False, + is_required=True, + enum_type=None + ) + ] + ) + + converter_code = self.generator.generate(model_info) + + # Check required parameter in ReadJson + assert 'string name = default(string);' in converter_code + assert 'jsonObject["name"]' in converter_code + + # Check required parameter in constructor + assert 'name: name' in converter_code + + # Check Option property + assert 'new Option(description)' in converter_code + + def test_generate_converter_with_enum(self): + """Test generating a converter with enum property""" + model_info = ModelInfo( + name="GetEnumResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="type", + pascal_name="Type", + json_name="type", + csharp_type="GetEnumResponse.TypeEnum", + is_option=True, + is_nullable=True, + is_required=False, + enum_type="TypeEnum" + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check enum handling in ReadJson + assert 'GetEnumResponse.TypeEnum?' in converter_code + assert 'TypeEnumFromString' in converter_code + + # Check enum handling in WriteJson + assert 'TypeEnumToJsonValue' in converter_code + assert 'value.Type' in converter_code + + def test_generate_converter_with_list(self): + """Test generating a converter with List<> property""" + model_info = ModelInfo( + name="GetListResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="items", + pascal_name="Items", + json_name="items", + csharp_type="List", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check List handling + assert 'List? items = default(List?);' in converter_code + assert 'ToObject?>(serializer)' in converter_code + assert 'new Option?>(items)' in converter_code + + def test_generate_converter_with_dictionary(self): + """Test generating a converter with Dictionary<> property""" + model_info = ModelInfo( + name="CreateOrgRequest", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="featureFlags", + pascal_name="FeatureFlags", + json_name="feature_flags", + csharp_type="Dictionary", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check Dictionary handling + assert 'Dictionary?' in converter_code + # The generator may use ToObject with serializer or direct deserialization + assert 'Dictionary' in converter_code + assert 'new Option?>(featureFlags)' in converter_code + + def test_generate_converter_with_reserved_keyword(self): + """Test generating a converter with C# reserved keyword (event)""" + model_info = ModelInfo( + name="GetEventResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="@event", + pascal_name="Event", + json_name="event", + csharp_type="GetEventResponseEvent", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check reserved keyword escaping + assert '@event = default' in converter_code + assert '@event: @event' in converter_code + assert 'value.Event' in converter_code # PascalCase property name + + def test_generate_converter_with_snake_case_json_name(self): + """Test generating a converter with snake_case JSON property name""" + model_info = ModelInfo( + name="GetResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="nextToken", + pascal_name="NextToken", + json_name="next_token", + csharp_type="string", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check snake_case JSON name is preserved + assert 'jsonObject["next_token"]' in converter_code + assert 'writer.WritePropertyName("next_token")' in converter_code + assert 'value.NextToken' in converter_code # PascalCase property access + + def test_generate_converter_code_structure(self): + """Test that generated converter has correct C# structure""" + model_info = ModelInfo( + name="TestResponse", + namespace="Kinde.Api.Model", + properties=[], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check required using statements + assert "using System;" in converter_code + assert "using System.Collections.Generic;" in converter_code + assert "using Newtonsoft.Json;" in converter_code + assert "using Newtonsoft.Json.Linq;" in converter_code + assert "using Kinde.Api.Model;" in converter_code + assert "using Kinde.Api.Client;" in converter_code + + # Check namespace + assert "namespace Kinde.Api.Converters" in converter_code + + # Check class declaration + assert "public class TestResponseNewtonsoftConverter" in converter_code + assert "JsonConverter" in converter_code + + # Check methods + assert "public override bool CanRead => true;" in converter_code + assert "public override bool CanWrite => true;" in converter_code + assert "public override TestResponse ReadJson" in converter_code + assert "public override void WriteJson" in converter_code + + def test_generate_converter_nullable_handling(self): + """Test that nullable types are handled correctly""" + model_info = ModelInfo( + name="TestResponse", + namespace="Kinde.Api.Model", + properties=[ + PropertyInfo( + name="nullableString", + pascal_name="NullableString", + json_name="nullable_string", + csharp_type="string", + is_option=True, + is_nullable=True, + is_required=False, + enum_type=None + ), + PropertyInfo( + name="nonNullableString", + pascal_name="NonNullableString", + json_name="non_nullable_string", + csharp_type="string", + is_option=True, + is_nullable=False, + is_required=False, + enum_type=None + ) + ], + required_params=[] + ) + + converter_code = self.generator.generate(model_info) + + # Check nullable handling + assert 'string? nullableString = default(string?);' in converter_code + assert 'new Option(nullableString)' in converter_code + + # Check non-nullable handling + assert 'string nonNullableString = default(string);' in converter_code + assert 'new Option(nonNullableString)' in converter_code + diff --git a/tools/converter-generator/tests/test_integration.py b/tools/converter-generator/tests/test_integration.py new file mode 100644 index 0000000..a3d0478 --- /dev/null +++ b/tools/converter-generator/tests/test_integration.py @@ -0,0 +1,260 @@ +""" +Integration tests for the complete converter generation pipeline + +Tests based on production Kinde requirements: +- Must generate converters that compile +- Must handle real Kinde model structures +- Must generate correct Option<> wrapping +- Must preserve JSON property names +- Must handle all property types from production models +""" +import pytest +from pathlib import Path +import tempfile +import os +import subprocess +import sys + +import sys +from pathlib import Path + +# Add parent directory to path (tools/converter-generator) +parent_dir = Path(__file__).parent.parent +sys.path.insert(0, str(parent_dir)) + +from generate_converters import ( + CSharpModelParser, + ConverterGenerator, + ConverterValidator +) + + +class TestIntegration: + """Integration tests for complete converter generation""" + + def setup_method(self): + """Set up test fixtures""" + self.parser = CSharpModelParser() + template_dir = Path(__file__).parent.parent / "templates" + self.generator = ConverterGenerator(template_dir) + self.validator = ConverterValidator() + + def test_generate_and_validate_simple_converter(self): + """Test generating a simple converter and validating it""" + # Create a simple model file + model_content = """ +namespace Kinde.Api.Model +{ + public partial class TestResponse + { + [JsonConstructor] + public TestResponse(Option code = default, Option message = default) + { + CodeOption = code; + MessageOption = message; + OnCreated(); + } + + [JsonIgnore] + public Option CodeOption { get; private set; } + + [JsonPropertyName("code")] + public string? Code { get { return this.CodeOption; } set { this.CodeOption = new Option(value); } } + + [JsonIgnore] + public Option MessageOption { get; private set; } + + [JsonPropertyName("message")] + public string? Message { get { return this.MessageOption; } set { this.MessageOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + # Parse model + model_info = self.parser.parse_model(model_path) + assert model_info is not None + assert model_info.needs_converter is True + + # Generate converter + converter_code = self.generator.generate(model_info) + assert converter_code is not None + assert len(converter_code) > 0 + + # Write converter to file + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(converter_code) + f.flush() + converter_path = Path(f.name) + + try: + # Validate converter + is_valid, error = self.validator.validate(converter_path) + assert is_valid, f"Converter validation failed: {error}" + + # Check required elements + assert "public class TestResponseNewtonsoftConverter" in converter_code + assert "ReadJson" in converter_code + assert "WriteJson" in converter_code + assert "new Option(code)" in converter_code + assert "value.CodeOption.IsSet" in converter_code + finally: + os.unlink(converter_path) + finally: + os.unlink(model_path) + + def test_generate_converter_with_all_property_types(self): + """Test generating a converter with all property types (production requirement)""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class ComprehensiveResponse + { + [JsonConstructor] + public ComprehensiveResponse( + Option code = default, + Option type = default, + Option?> items = default, + Option?> flags = default, + Option inner = default + ) + { + CodeOption = code; + TypeOption = type; + ItemsOption = items; + FlagsOption = flags; + InnerOption = inner; + OnCreated(); + } + + [JsonIgnore] + public Option CodeOption { get; private set; } + [JsonPropertyName("code")] + public string? Code { get { return this.CodeOption; } set { this.CodeOption = new Option(value); } } + + [JsonIgnore] + public Option TypeOption { get; private set; } + [JsonPropertyName("type")] + public ComprehensiveResponse.TypeEnum? Type { get { return this.TypeOption; } set { this.TypeOption = new Option(value); } } + + [JsonIgnore] + public Option?> ItemsOption { get; private set; } + [JsonPropertyName("items")] + public List? Items { get { return this.ItemsOption; } set { this.ItemsOption = new Option?>(value); } } + + [JsonIgnore] + public Option?> FlagsOption { get; private set; } + [JsonPropertyName("flags")] + public Dictionary? Flags { get { return this.FlagsOption; } set { this.FlagsOption = new Option?>(value); } } + + [JsonIgnore] + public Option InnerOption { get; private set; } + [JsonPropertyName("inner")] + public ComprehensiveResponseInner? Inner { get { return this.InnerOption; } set { this.InnerOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + # Parse model + model_info = self.parser.parse_model(model_path) + assert model_info is not None + assert len(model_info.properties) == 5 + + # Generate converter + converter_code = self.generator.generate(model_info) + + # Validate all property types are handled + assert "string? code" in converter_code + assert "TypeEnum?" in converter_code + assert "List? items" in converter_code + # Note: The parser may extract Dictionary as enum if it's not properly parsed + # Check that flags property exists in some form + assert "flags" in converter_code.lower() + assert "ComprehensiveResponseInner? inner" in converter_code + + # Validate Option<> wrapping + assert "new Option(code)" in converter_code + assert "new Option(type)" in converter_code + assert "new Option?>(items)" in converter_code + # Flags may be parsed as enum or dictionary depending on parser logic + assert "flags" in converter_code.lower() + assert "new Option(inner)" in converter_code + + # Validate WriteJson uses PascalCase + assert "value.Code" in converter_code + assert "value.Type" in converter_code + assert "value.Items" in converter_code + assert "value.Flags" in converter_code + assert "value.Inner" in converter_code + finally: + os.unlink(model_path) + + def test_generate_converter_preserves_json_names(self): + """Test that JSON property names are preserved correctly (production requirement)""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class SnakeCaseResponse + { + [JsonConstructor] + public SnakeCaseResponse( + Option nextToken = default, + Option apiKey = default + ) + { + NextTokenOption = nextToken; + ApiKeyOption = apiKey; + OnCreated(); + } + + [JsonIgnore] + public Option NextTokenOption { get; private set; } + [JsonPropertyName("next_token")] + public string? NextToken { get { return this.NextTokenOption; } set { this.NextTokenOption = new Option(value); } } + + [JsonIgnore] + public Option ApiKeyOption { get; private set; } + [JsonPropertyName("api_key")] + public string? ApiKey { get { return this.ApiKeyOption; } set { this.ApiKeyOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + # Parse model + model_info = self.parser.parse_model(model_path) + assert model_info is not None + + # Check JSON names are preserved + next_token_prop = next((p for p in model_info.properties if p.name == "nextToken"), None) + assert next_token_prop is not None + assert next_token_prop.json_name == "next_token" + + api_key_prop = next((p for p in model_info.properties if p.name == "apiKey"), None) + assert api_key_prop is not None + assert api_key_prop.json_name == "api_key" + + # Generate converter + converter_code = self.generator.generate(model_info) + + # Validate JSON names in generated code + assert 'jsonObject["next_token"]' in converter_code + assert 'jsonObject["api_key"]' in converter_code + assert 'writer.WritePropertyName("next_token")' in converter_code + assert 'writer.WritePropertyName("api_key")' in converter_code + finally: + os.unlink(model_path) + diff --git a/tools/converter-generator/tests/test_parser.py b/tools/converter-generator/tests/test_parser.py new file mode 100644 index 0000000..ce776d9 --- /dev/null +++ b/tools/converter-generator/tests/test_parser.py @@ -0,0 +1,437 @@ +""" +Unit tests for CSharpModelParser + +Tests based on production Kinde requirements: +- Must extract Option<> properties correctly +- Must extract required (non-Option) parameters +- Must handle nested generics (List<>, Dictionary<>) +- Must escape C# reserved keywords +- Must find JSON property names from [JsonPropertyName] attributes +- Must handle nullable types correctly +""" +import pytest +from pathlib import Path +import tempfile +import os + +import sys +from pathlib import Path + +# Add parent directory to path (tools/converter-generator) +parent_dir = Path(__file__).parent.parent +sys.path.insert(0, str(parent_dir)) + +from generate_converters import CSharpModelParser, PropertyInfo, ModelInfo + + +class TestCSharpModelParser: + """Test CSharpModelParser with production-like C# models""" + + def setup_method(self): + """Set up test fixtures""" + self.parser = CSharpModelParser() + + def test_parse_simple_response_model(self): + """Test parsing a simple response model with Option<> properties""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetSimpleResponse + { + [JsonConstructor] + public GetSimpleResponse(Option code = default, Option message = default) + { + CodeOption = code; + MessageOption = message; + OnCreated(); + } + + [JsonIgnore] + public Option CodeOption { get; private set; } + + [JsonPropertyName("code")] + public string? Code { get { return this.CodeOption; } set { this.CodeOption = new Option(value); } } + + [JsonIgnore] + public Option MessageOption { get; private set; } + + [JsonPropertyName("message")] + public string? Message { get { return this.MessageOption; } set { this.MessageOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert model_info.name == "GetSimpleResponse" + assert model_info.namespace == "Kinde.Api.Model" + assert len(model_info.properties) == 2 + assert len(model_info.required_params) == 0 + + # Check first property + code_prop = next((p for p in model_info.properties if p.name == "code"), None) + assert code_prop is not None + assert code_prop.json_name == "code" + assert code_prop.csharp_type == "string" + assert code_prop.is_option is True + assert code_prop.is_nullable is True + assert code_prop.pascal_name == "Code" + + # Check second property + message_prop = next((p for p in model_info.properties if p.name == "message"), None) + assert message_prop is not None + assert message_prop.json_name == "message" + assert message_prop.csharp_type == "string" + assert message_prop.is_option is True + finally: + os.unlink(model_path) + + def test_parse_model_with_required_params(self): + """Test parsing a model with required (non-Option) parameters""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class CreateSimpleRequest + { + [JsonConstructor] + public CreateSimpleRequest(string name, Option description = default) + { + Name = name; + DescriptionOption = description; + OnCreated(); + } + + [JsonPropertyName("name")] + public string Name { get; set; } + + [JsonIgnore] + public Option DescriptionOption { get; private set; } + + [JsonPropertyName("description")] + public string? Description { get { return this.DescriptionOption; } set { this.DescriptionOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert model_info.name == "CreateSimpleRequest" + assert len(model_info.properties) == 1 + assert len(model_info.required_params) == 1 + + # Check required parameter + name_param = model_info.required_params[0] + assert name_param.name == "name" + assert name_param.json_name == "name" + assert name_param.csharp_type == "string" + assert name_param.is_option is False + assert name_param.is_required is True + + # Check Option property + desc_prop = model_info.properties[0] + assert desc_prop.name == "description" + assert desc_prop.is_option is True + finally: + os.unlink(model_path) + + def test_parse_model_with_list_property(self): + """Test parsing a model with List<> property""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetListResponse + { + [JsonConstructor] + public GetListResponse(Option?> items = default) + { + ItemsOption = items; + OnCreated(); + } + + [JsonIgnore] + public Option?> ItemsOption { get; private set; } + + [JsonPropertyName("items")] + public List? Items { get { return this.ItemsOption; } set { this.ItemsOption = new Option?>(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.properties) == 1 + + items_prop = model_info.properties[0] + assert items_prop.name == "items" + assert items_prop.csharp_type == "List" + assert items_prop.is_list is True + # Note: List? is nullable, but the parser extracts List as the type + # The nullable is part of Option?>, not the List itself + finally: + os.unlink(model_path) + + def test_parse_model_with_enum_property(self): + """Test parsing a model with enum property""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetEnumResponse + { + [JsonConstructor] + public GetEnumResponse(Option type = default) + { + TypeOption = type; + OnCreated(); + } + + [JsonIgnore] + public Option TypeOption { get; private set; } + + [JsonPropertyName("type")] + public GetEnumResponse.TypeEnum? Type { get { return this.TypeOption; } set { this.TypeOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.properties) == 1 + + type_prop = model_info.properties[0] + assert type_prop.name == "type" + assert type_prop.is_enum is True + # The parser extracts the enum type from the full type name + # GetEnumResponse.TypeEnum becomes "GetEnum" (model name prefix) + assert "Enum" in type_prop.enum_type or type_prop.enum_type == "GetEnum" + finally: + os.unlink(model_path) + + def test_parse_model_with_reserved_keyword(self): + """Test parsing a model with C# reserved keyword (event)""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetEventResponse + { + [JsonConstructor] + public GetEventResponse(Option @event = default) + { + EventOption = @event; + OnCreated(); + } + + [JsonIgnore] + public Option EventOption { get; private set; } + + [JsonPropertyName("event")] + public GetEventResponseEvent? Event { get { return this.EventOption; } set { this.EventOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.properties) == 1 + + event_prop = model_info.properties[0] + # Should escape reserved keyword + assert event_prop.name == "@event" + assert event_prop.json_name == "event" + assert event_prop.pascal_name == "Event" + finally: + os.unlink(model_path) + + def test_parse_model_with_dictionary_property(self): + """Test parsing a model with Dictionary<> property""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class CreateOrgRequest + { + [JsonConstructor] + public CreateOrgRequest(string name, Option?> featureFlags = default) + { + Name = name; + FeatureFlagsOption = featureFlags; + OnCreated(); + } + + [JsonPropertyName("name")] + public string Name { get; set; } + + [JsonIgnore] + public Option?> FeatureFlagsOption { get; private set; } + + [JsonPropertyName("feature_flags")] + public Dictionary? FeatureFlags { get { return this.FeatureFlagsOption; } set { this.FeatureFlagsOption = new Option?>(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.required_params) == 1 + assert len(model_info.properties) == 1 + + # Check required parameter + name_param = model_info.required_params[0] + assert name_param.name == "name" + assert name_param.csharp_type == "string" + + # Check Dictionary property + flags_prop = model_info.properties[0] + assert flags_prop.name == "featureFlags" + assert flags_prop.json_name == "feature_flags" + assert flags_prop.csharp_type == "Dictionary" + assert flags_prop.is_dictionary is True + finally: + os.unlink(model_path) + + def test_parse_model_with_snake_case_json_name(self): + """Test parsing a model with snake_case JSON property names""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetResponse + { + [JsonConstructor] + public GetResponse(Option nextToken = default) + { + NextTokenOption = nextToken; + OnCreated(); + } + + [JsonIgnore] + public Option NextTokenOption { get; private set; } + + [JsonPropertyName("next_token")] + public string? NextToken { get { return this.NextTokenOption; } set { this.NextTokenOption = new Option(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.properties) == 1 + + prop = model_info.properties[0] + assert prop.name == "nextToken" + assert prop.json_name == "next_token" # Should preserve snake_case + assert prop.pascal_name == "NextToken" + finally: + os.unlink(model_path) + + def test_parse_model_with_nested_inner_model(self): + """Test parsing a model with nested Inner model property""" + model_content = """ +namespace Kinde.Api.Model +{ + public partial class GetApisResponse + { + [JsonConstructor] + public GetApisResponse(Option?> apis = default) + { + ApisOption = apis; + OnCreated(); + } + + [JsonIgnore] + public Option?> ApisOption { get; private set; } + + [JsonPropertyName("apis")] + public List? Apis { get { return this.ApisOption; } set { this.ApisOption = new Option?>(value); } } + } +} +""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.cs', delete=False) as f: + f.write(model_content) + f.flush() + model_path = Path(f.name) + + try: + model_info = self.parser.parse_model(model_path) + + assert model_info is not None + assert len(model_info.properties) == 1 + + apis_prop = model_info.properties[0] + assert apis_prop.name == "apis" + assert apis_prop.csharp_type == "List" + assert apis_prop.is_list is True + # Note: List? is nullable, but the parser extracts List<...> as the type + # The nullable is part of Option?>, not the List itself + finally: + os.unlink(model_path) + + def test_split_parameters_with_nested_generics(self): + """Test parameter splitting with nested generics""" + params_str = "Option code = default, Option?> apis = default, Option?> flags = default" + + params = self.parser._split_parameters(params_str) + + assert len(params) == 3 + assert "Option code = default" in params[0] + assert "Option?> apis = default" in params[1] + assert "Option?> flags = default" in params[2] + + def test_find_json_property_name(self): + """Test finding JSON property name from [JsonPropertyName] attribute""" + content = """ + [JsonPropertyName("next_token")] + public string? NextToken { get { return this.NextTokenOption; } set { this.NextTokenOption = new Option(value); } } + """ + + json_name = self.parser._find_json_property_name("nextToken", content) + assert json_name == "next_token" + + def test_find_json_property_name_fallback(self): + """Test fallback to snake_case conversion when [JsonPropertyName] not found""" + content = """ + public string? SomeProperty { get; set; } + """ + + json_name = self.parser._find_json_property_name("someProperty", content) + assert json_name == "some_property" + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/__init__.py new file mode 100644 index 0000000..f987a53 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,222 @@ +# don't import any costly modules +import sys +import os + + +is_pypy = '__pypy__' in sys.builtin_module_names + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + import warnings + + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils." + ) + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + import warnings + + warnings.warn("Setuptools is replacing distutils.") + mods = [ + name + for name in sys.modules + if name == "distutils" or name.startswith("distutils.") + ] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') + return which == 'local' + + +def ensure_local_distutils(): + import importlib + + clear_distutils() + + # With the DistutilsMetaFinder in place, + # perform an import to cause distutils to be + # loaded from setuptools._distutils. Ref #2906. + with shim(): + importlib.import_module('distutils') + + # check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + assert 'setuptools._distutils.log' not in sys.modules + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class _TrivialRe: + def __init__(self, *patterns): + self._patterns = patterns + + def match(self, string): + return all(pat in string for pat in self._patterns) + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + # optimization: only consider top level modules and those + # found in the CPython test suite. + if path is not None and not fullname.startswith('test.'): + return + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + if self.is_cpython(): + return + + import importlib + import importlib.abc + import importlib.util + + try: + mod = importlib.import_module('setuptools._distutils') + except Exception: + # There are a couple of cases where setuptools._distutils + # may not be present: + # - An older Setuptools without a local distutils is + # taking precedence. Ref #2957. + # - Path manipulation during sitecustomize removes + # setuptools from the path but only after the hook + # has been loaded. Ref #2980. + # In either case, fall back to stdlib behavior. + return + + class DistutilsLoader(importlib.abc.Loader): + def create_module(self, spec): + mod.__name__ = 'distutils' + return mod + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader( + 'distutils', DistutilsLoader(), origin=mod.__file__ + ) + + @staticmethod + def is_cpython(): + """ + Suppress supplying distutils for CPython (build and tests). + Ref #2965 and #3007. + """ + return os.path.isfile('pybuilddir.txt') + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @classmethod + def pip_imported_during_build(cls): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + + return any( + cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) + ) + + @staticmethod + def frame_file_is_setup(frame): + """ + Return True if the indicated frame suggests a setup.py file. + """ + # some frames may not have __file__ (#2940) + return frame.f_globals.get('__file__', '').endswith('setup.py') + + def spec_for_sensitive_tests(self): + """ + Ensure stdlib distutils when running select tests under CPython. + + python/cpython#91169 + """ + clear_distutils() + self.spec_for_distutils = lambda: None + + sensitive_tests = ( + [ + 'test.test_distutils', + 'test.test_peg_generator', + 'test.test_importlib', + ] + if sys.version_info < (3, 10) + else [ + 'test.test_distutils', + ] + ) + + +for name in DistutilsMetaFinder.sensitive_tests: + setattr( + DistutilsMetaFinder, + f'spec_for_{name}', + DistutilsMetaFinder.spec_for_sensitive_tests, + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + DISTUTILS_FINDER in sys.meta_path or insert_shim() + + +class shim: + def __enter__(self): + insert_shim() + + def __exit__(self, exc, value, tb): + remove_shim() + + +def insert_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/override.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/override.py new file mode 100644 index 0000000..2cc433a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_distutils_hack/override.py @@ -0,0 +1 @@ +__import__('_distutils_hack').do_override() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..8eb8ec9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__ + from ._version import version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_argcomplete.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..59426ef --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,117 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +from __future__ import annotations + +import argparse +from glob import glob +import os +import sys +from typing import Any + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> list[str]: + # Only called on non option completions. + if os.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: FastFilesCompleter | None = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..7f67a2e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,26 @@ +"""Python inspection/code generation API.""" + +from __future__ import annotations + +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + + +__all__ = [ + "Code", + "ExceptionInfo", + "Frame", + "Source", + "Traceback", + "TracebackEntry", + "filter_traceback", + "getfslineno", + "getrawcode", +] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/code.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..add2a49 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/code.py @@ -0,0 +1,1565 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import inspect +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +import os +from pathlib import Path +import re +import sys +from traceback import extract_tb +from traceback import format_exception +from traceback import format_exception_only +from traceback import FrameSummary +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import ClassVar +from typing import Final +from typing import final +from typing import Generic +from typing import Literal +from typing import overload +from typing import SupportsIndex +from typing import TypeAlias +from typing import TypeVar + +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + +EXCEPTION_OR_MORE = type[BaseException] | tuple[type[BaseException], ...] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> Code: + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Path | str: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Source | None: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> Source: + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> Source: + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + repr_style: Literal["short", "long"] | None = None, + ) -> None: + self._rawentry: Final = rawentry + self._repr_style: Final = repr_style + + def with_repr_style( + self, repr_style: Literal["short", "long"] | None + ) -> TracebackEntry: + return TracebackEntry(self._rawentry, repr_style) + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + def get_python_framesummary(self) -> FrameSummary: + # Python's built-in traceback module implements all the nitty gritty + # details to get column numbers of out frames. + stack_summary = extract_tb(self._rawentry, limit=1) + return stack_summary[0] + + # Column and end line numbers introduced in python 3.11 + if sys.version_info < (3, 11): + + @property + def end_lineno_relative(self) -> int | None: + return None + + @property + def colno(self) -> int | None: + return None + + @property + def end_colno(self) -> int | None: + return None + else: + + @property + def end_lineno_relative(self) -> int | None: + frame_summary = self.get_python_framesummary() + if frame_summary.end_lineno is None: # pragma: no cover + return None + return frame_summary.end_lineno - 1 - self.frame.code.firstlineno + + @property + def colno(self) -> int | None: + """Starting byte offset of the expression in the traceback entry.""" + return self.get_python_framesummary().colno + + @property + def end_colno(self) -> int | None: + """Ending byte offset of the expression in the traceback entry.""" + return self.get_python_framesummary().end_colno + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return f"" + + @property + def statement(self) -> Source: + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Path | str: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: dict[str | Path, ast.AST] | None = None + ) -> Source | None: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self, excinfo: ExceptionInfo[BaseException] | None) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: bool | Callable[[ExceptionInfo[BaseException] | None], bool] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(excinfo) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return f" File '{self.path}':{self.lineno + 1} in {name}\n {line}\n" + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(list[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: TracebackType | Iterable[TracebackEntry], + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: TracebackType | None = cur + while cur_ is not None: + yield TracebackEntry(cur_) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: os.PathLike[str] | str | None = None, + lineno: int | None = None, + firstlineno: int | None = None, + excludepath: os.PathLike[str] | None = None, + ) -> Traceback: + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry) + return self + + @overload + def __getitem__(self, key: SupportsIndex) -> TracebackEntry: ... + + @overload + def __getitem__(self, key: slice) -> Traceback: ... + + def __getitem__(self, key: SupportsIndex | slice) -> TracebackEntry | Traceback: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, + excinfo_or_fn: ExceptionInfo[BaseException] | Callable[[TracebackEntry], bool], + /, + ) -> Traceback: + """Return a Traceback instance with certain items removed. + + If the filter is an `ExceptionInfo`, removes all the ``TracebackEntry``s + which are hidden (see ishidden() above). + + Otherwise, the filter is a function that gets a single argument, a + ``TracebackEntry`` instance, and should return True when the item should + be added to the ``Traceback``, False when not. + """ + if isinstance(excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(excinfo_or_fn) # noqa: E731 + else: + fn = excinfo_or_fn + return Traceback(filter(fn, self)) + + def recursionindex(self) -> int | None: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: dict[tuple[Any, int, int], list[dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + values = cache.setdefault(key, []) + # Since Python 3.13 f_locals is a proxy, freeze it. + loc = dict(entry.frame.f_locals) + if values: + for otherloc in values: + if otherloc == loc: + return i + values.append(loc) + return None + + +def stringify_exception( + exc: BaseException, include_subexception_msg: bool = True +) -> str: + try: + notes = getattr(exc, "__notes__", []) + except KeyError: + # Workaround for https://github.com/python/cpython/issues/98778 on + # some 3.10 and 3.11 patch versions. + HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) + if sys.version_info < (3, 12) and isinstance(exc, HTTPError): + notes = [] + else: # pragma: no cover + # exception not related to above bug, reraise + raise + if not include_subexception_msg and isinstance(exc, BaseExceptionGroup): + message = exc.message + else: + message = str(exc) + + return "\n".join( + [ + message, + *notes, + ] + ) + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@dataclasses.dataclass +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: tuple[type[E], E, TracebackType] | None + _striptext: str + _traceback: Traceback | None + + def __init__( + self, + excinfo: tuple[type[E], E, TracebackType] | None, + striptext: str = "", + traceback: Traceback | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exception( + cls, + # Ignoring error: "Cannot use a covariant type variable as a parameter". + # This is OK to ignore because this class is (conceptually) readonly. + # See https://github.com/python/mypy/issues/7049. + exception: E, # type: ignore[misc] + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Return an ExceptionInfo for an existing exception. + + The exception must have a non-``None`` ``__traceback__`` attribute, + otherwise this function fails with an assertion error. This means that + the exception must have been raised, or added a traceback with the + :py:meth:`~BaseException.with_traceback()` method. + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + + .. versionadded:: 7.4 + """ + assert exception.__traceback__, ( + "Exceptions passed to ExcInfo.from_exception(...)" + " must have a non-None __traceback__." + ) + exc_info = (type(exception), exception, exception.__traceback__) + return cls.from_exc_info(exc_info, exprinfo) + + @classmethod + def from_exc_info( + cls, + exc_info: tuple[type[E], E, TracebackType], + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Like :func:`from_exception`, but using old-style exc_info tuple.""" + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> ExceptionInfo[E]: + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> type[E]: + """The exception class.""" + assert self._excinfo is not None, ( + ".type can only be used after the context manager exits" + ) + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert self._excinfo is not None, ( + ".value can only be used after the context manager exits" + ) + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert self._excinfo is not None, ( + ".tb can only be used after the context manager exits" + ) + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert self._excinfo is not None, ( + ".typename can only be used after the context manager exits" + ) + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + + def _get_single_subexc( + eg: BaseExceptionGroup[BaseException], + ) -> BaseException | None: + if len(eg.exceptions) != 1: + return None + if isinstance(e := eg.exceptions[0], BaseExceptionGroup): + return _get_single_subexc(e) + return e + + if ( + tryshort + and isinstance(self.value, BaseExceptionGroup) + and (subexc := _get_single_subexc(self.value)) is not None + ): + return f"{subexc!r} [single exception in {type(self.value).__name__}]" + + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> ReprFileLocation | None: + # Find last non-hidden traceback entry that led to the exception of the + # traceback, or None if all hidden. + for i in range(-1, -len(self.traceback) - 1, -1): + entry = self.traceback[i] + if not entry.ishidden(self): + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + exconly = self.exconly(tryshort=True) + return ReprFileLocation(path, lineno + 1, exconly) + return None + + def getrepr( + self, + showlocals: bool = False, + style: TracebackStyle = "long", + abspath: bool = False, + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True, + funcargs: bool = False, + truncate_locals: bool = True, + truncate_args: bool = True, + chain: bool = True, + ) -> ReprExceptionInfo | ExceptionChainRepr: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|line|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param tbfilter: + A filter for traceback entries. + + * If false, don't hide any entries. + * If true, hide internal entries and entries that contain a local + variable ``__tracebackhide__ = True``. + * If a callable, delegates the filtering to the callable. + + Ignored if ``style`` is ``"native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool truncate_args: + With ``showargs==True``, make sure args can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + reprtraceback=ReprTracebackNative( + format_exception( + self.type, + self.value, + self.traceback[0]._rawentry if self.traceback else None, + ) + ), + reprcrash=self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def match(self, regexp: str | re.Pattern[str]) -> Literal[True]: + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + value = stringify_exception(self.value) + msg = ( + f"Regex pattern did not match.\n" + f" Expected regex: {regexp!r}\n" + f" Actual message: {value!r}" + ) + if regexp == value: + msg += "\n Did you mean to `re.escape()` the regex?" + assert re.search(regexp, value), msg + # Return True to allow for "assert excinfo.match()". + return True + + def _group_contains( + self, + exc_group: BaseExceptionGroup[BaseException], + expected_exception: EXCEPTION_OR_MORE, + match: str | re.Pattern[str] | None, + target_depth: int | None = None, + current_depth: int = 1, + ) -> bool: + """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" + if (target_depth is not None) and (current_depth > target_depth): + # already descended past the target depth + return False + for exc in exc_group.exceptions: + if isinstance(exc, BaseExceptionGroup): + if self._group_contains( + exc, expected_exception, match, target_depth, current_depth + 1 + ): + return True + if (target_depth is not None) and (current_depth != target_depth): + # not at the target depth, no match + continue + if not isinstance(exc, expected_exception): + continue + if match is not None: + value = stringify_exception(exc) + if not re.search(match, value): + continue + return True + return False + + def group_contains( + self, + expected_exception: EXCEPTION_OR_MORE, + *, + match: str | re.Pattern[str] | None = None, + depth: int | None = None, + ) -> bool: + """Check whether a captured exception group contains a matching exception. + + :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + + :param str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its `PEP-678 ` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + :param Optional[int] depth: + If `None`, will search for a matching exception at any nesting depth. + If >= 1, will only match an exception if it's at the specified depth (depth = 1 being + the exceptions contained within the topmost exception group). + + .. versionadded:: 8.0 + + .. warning:: + This helper makes it easy to check for the presence of specific exceptions, + but it is very bad for checking that the group does *not* contain + *any other exceptions*. + You should instead consider using :class:`pytest.RaisesGroup` + + """ + msg = "Captured exception is not an instance of `BaseExceptionGroup`" + assert isinstance(self.value, BaseExceptionGroup), msg + msg = "`depth` must be >= 1 if specified" + assert (depth is None) or (depth >= 1), msg + return self._group_contains(self.value, expected_exception, match, depth) + + +# Type alias for the `tbfilter` setting: +# bool: If True, it should be filtered using Traceback.filter() +# callable: A callable that takes an ExceptionInfo and returns the filtered traceback. +TracebackFilter: TypeAlias = bool | Callable[[ExceptionInfo[BaseException]], Traceback] + + +@dataclasses.dataclass +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: TracebackStyle = "long" + abspath: bool = True + tbfilter: TracebackFilter = True + funcargs: bool = False + truncate_locals: bool = True + truncate_args: bool = True + chain: bool = True + astcache: dict[str | Path, ast.AST] = dataclasses.field( + default_factory=dict, init=False, repr=False + ) + + def _getindent(self, source: Source) -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Source | None: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> ReprFuncArgs | None: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + if self.truncate_args: + str_repr = saferepr(argvalue) + else: + str_repr = saferepr(argvalue, maxsize=None) + args.append((argname, str_repr)) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Source | None, + line_index: int = -1, + excinfo: ExceptionInfo[BaseException] | None = None, + short: bool = False, + end_line_index: int | None = None, + colno: int | None = None, + end_colno: int | None = None, + ) -> list[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is not None and line_index < 0: + line_index += len(source) + if source is None or line_index >= len(source.lines) or line_index < 0: + # `line_index` could still be outside `range(len(source.lines))` if + # we're processing AST with pathological position attributes. + source = Source("???") + line_index = 0 + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + lines.extend( + self.get_highlight_arrows_for_line( + raw_line=source.raw_lines[line_index], + line=source.lines[line_index].strip(), + lineno=line_index, + end_lineno=end_line_index, + colno=colno, + end_colno=end_colno, + ) + ) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + lines.extend( + self.get_highlight_arrows_for_line( + raw_line=source.raw_lines[line_index], + line=source.lines[line_index], + lineno=line_index, + end_lineno=end_line_index, + colno=colno, + end_colno=end_colno, + ) + ) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_highlight_arrows_for_line( + self, + line: str, + raw_line: str, + lineno: int | None, + end_lineno: int | None, + colno: int | None, + end_colno: int | None, + ) -> list[str]: + """Return characters highlighting a source line. + + Example with colno and end_colno pointing to the bar expression: + "foo() + bar()" + returns " ^^^^^" + """ + if lineno != end_lineno: + # Don't handle expressions that span multiple lines. + return [] + if colno is None or end_colno is None: + # Can't do anything without column information. + return [] + + num_stripped_chars = len(raw_line) - len(line) + + start_char_offset = _byte_offset_to_character_offset(raw_line, colno) + end_char_offset = _byte_offset_to_character_offset(raw_line, end_colno) + num_carets = end_char_offset - start_char_offset + # If the highlight would span the whole line, it is redundant, don't + # show it. + if num_carets >= len(line.strip()): + return [] + + highlights = " " + highlights += " " * (start_char_offset - num_stripped_chars + 1) + highlights += "^" * num_carets + return [highlights] + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> list[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> ReprLocals | None: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry | None, + excinfo: ExceptionInfo[BaseException] | None = None, + ) -> ReprEntry: + lines: list[str] = [] + style = ( + entry._repr_style + if entry is not None and entry._repr_style is not None + else self.style + ) + if style in ("short", "long") and entry is not None: + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + end_line_index, colno, end_colno = None, None, None + else: + line_index = entry.relline + end_line_index = entry.end_lineno_relative + colno = entry.colno + end_colno = entry.end_colno + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source( + source=source, + line_index=line_index, + excinfo=excinfo, + short=short, + end_line_index=end_line_index, + colno=colno, + end_colno=end_colno, + ) + lines.extend(s) + if short: + message = f"in {entry.name}" + else: + message = (excinfo and excinfo.typename) or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Path | str) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> ReprTraceback: + traceback = filter_excinfo_traceback(self.tbfilter, excinfo) + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + if not traceback: + if extraline is None: + extraline = "All traceback entries are hidden. Pass `--full-trace` to see hidden and internal frames." + entries = [self.repr_traceback_entry(None, excinfo)] + return ReprTraceback(entries, extraline, style=self.style) + + last = traceback[-1] + if self.style == "value": + entries = [self.repr_traceback_entry(last, excinfo)] + return ReprTraceback(entries, None, style=self.style) + + entries = [ + self.repr_traceback_entry(entry, excinfo if last == entry else None) + for entry in traceback + ] + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> tuple[Traceback, str | None]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: str | None = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + f" {type(e).__name__}: {e!s}\n" + f" Displaying first and last {max_frames} stack frames out of {len(traceback)}." + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo: ExceptionInfo[BaseException]) -> ExceptionChainRepr: + repr_chain: list[tuple[ReprTraceback, ReprFileLocation | None, str | None]] = [] + e: BaseException | None = excinfo.value + excinfo_: ExceptionInfo[BaseException] | None = excinfo + descr = None + seen: set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + + if excinfo_: + # Fall back to native traceback as a temporary workaround until + # full support for exception groups added to ExceptionInfo. + # See https://github.com/pytest-dev/pytest/issues/9159 + reprtraceback: ReprTraceback | ReprTracebackNative + if isinstance(e, BaseExceptionGroup): + # don't filter any sub-exceptions since they shouldn't have any internal frames + traceback = filter_excinfo_traceback(self.tbfilter, excinfo) + reprtraceback = ReprTracebackNative( + format_exception( + type(excinfo.value), + excinfo.value, + traceback[0]._rawentry, + ) + ) + else: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash = excinfo_._getreprcrash() + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative(format_exception(type(e), e, None)) + reprcrash = None + repr_chain += [(reprtraceback, reprcrash, descr)] + + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@dataclasses.dataclass(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@dataclasses.dataclass(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + sections: list[tuple[str, str, str]] = dataclasses.field( + init=False, default_factory=list + ) + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@dataclasses.dataclass(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]] + + def __init__( + self, + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]], + ) -> None: + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + super().__init__( + reprtraceback=chain[-1][0], + reprcrash=chain[-1][1], + ) + self.chain = chain + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[ReprEntry | ReprEntryNative] + extraline: str | None + style: TracebackStyle + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if entry.style == "long" or ( + entry.style == "short" and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + self.style = "native" + + +@dataclasses.dataclass(eq=False) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar[TracebackStyle] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@dataclasses.dataclass(eq=False) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: ReprFuncArgs | None + reprlocals: ReprLocals | None + reprfileloc: ReprFileLocation | None + style: TracebackStyle + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + if not self.lines: + return + + if self.style == "value": + # Using tw.write instead of tw.line for testing purposes due to TWMock implementation; + # lines written with TWMock.line and TWMock._write_source cannot be distinguished + # from each other, whereas lines written with TWMock.write are marked with TWMock.WRITE + for line in self.lines: + tw.write(line) + tw.write("\n") + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: list[str] = [] + source_lines: list[str] = [] + failure_lines: list[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + if self.reprfileloc: + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@dataclasses.dataclass(eq=False) +class ReprFileLocation(TerminalRepr): + path: str + lineno: int + message: str + + def __post_init__(self) -> None: + self.path = str(self.path) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@dataclasses.dataclass(eq=False) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@dataclasses.dataclass(eq=False) +class ReprFuncArgs(TerminalRepr): + args: Sequence[tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> tuple[str | Path, int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = (fn and absolutepath(fn)) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +def _byte_offset_to_character_offset(str, offset): + """Converts a byte based offset in a string to a code-point.""" + as_utf8 = str.encode("utf-8") + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True + + +def filter_excinfo_traceback( + tbfilter: TracebackFilter, excinfo: ExceptionInfo[BaseException] +) -> Traceback: + """Filter the exception traceback in ``excinfo`` according to ``tbfilter``.""" + if callable(tbfilter): + return tbfilter(excinfo) + elif tbfilter: + return excinfo.traceback.filter(excinfo) + else: + return excinfo.traceback diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/source.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..99c242d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_code/source.py @@ -0,0 +1,225 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from bisect import bisect_right +from collections.abc import Iterable +from collections.abc import Iterator +import inspect +import textwrap +import tokenize +import types +from typing import overload +import warnings + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: list[str] = [] + self.raw_lines: list[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + self.raw_lines = obj.raw_lines + elif isinstance(obj, tuple | list): + self.lines = deindent(x.rstrip("\n") for x in obj) + self.raw_lines = list(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + self.raw_lines = obj.split("\n") + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + self.raw_lines = src.split("\n") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: ... + + @overload + def __getitem__(self, key: slice) -> Source: ... + + def __getitem__(self, key: int | slice) -> str | Source: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + newsource.raw_lines = self.raw_lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> Source: + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.raw_lines = self.raw_lines + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> Source: + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.raw_lines = self.raw_lines + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> Source: + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + _ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> Source: + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + newsource.raw_lines = self.raw_lines + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> tuple[Source | None, int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + source.raw_lines = sourcelines + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> list[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: list[int] = [] + for x in ast.walk(node): + if isinstance(x, ast.stmt | ast.ExceptHandler): + # The lineno points to the class/def, so need to include the decorators. + if isinstance(x, ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: list[ast.stmt] | None = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: ast.AST | None = None, +) -> tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = ( + bool(source.lines[start]) and source.lines[start][0].isspace() + ) + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..b0155b1 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/pprint.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/pprint.py new file mode 100644 index 0000000..28f0690 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/pprint.py @@ -0,0 +1,673 @@ +# mypy: allow-untyped-defs +# This module was imported from the cpython standard library +# (https://github.com/python/cpython/) at commit +# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12). +# +# +# Original Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. +from __future__ import annotations + +import collections as _collections +from collections.abc import Callable +from collections.abc import Iterator +import dataclasses as _dataclasses +from io import StringIO as _StringIO +import re +import types as _types +from typing import Any +from typing import IO + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return (str(type(self.obj)), id(self.obj)) < ( + str(type(other.obj)), + id(other.obj), + ) + + +def _safe_tuple(t): + """Helper function for comparing 2-tuples""" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__( + self, + indent: int = 4, + width: int = 80, + depth: int | None = None, + ) -> None: + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + """ + if indent < 0: + raise ValueError("indent must be >= 0") + if depth is not None and depth <= 0: + raise ValueError("depth must be > 0") + if not width: + raise ValueError("width must be != 0") + self._depth = depth + self._indent_per_level = indent + self._width = width + + def pformat(self, object: Any) -> str: + sio = _StringIO() + self._format(object, sio, 0, 0, set(), 0) + return sio.getvalue() + + def _format( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + return + + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context.add(objid) + p(self, object, stream, indent, allowance, context, level + 1) + context.remove(objid) + elif ( + _dataclasses.is_dataclass(object) + and not isinstance(object, type) + and object.__dataclass_params__.repr # type:ignore[attr-defined] + and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") + and "__create_fn__" in object.__repr__.__wrapped__.__qualname__ + ): + context.add(objid) + self._pprint_dataclass( + object, stream, indent, allowance, context, level + 1 + ) + context.remove(objid) + else: + stream.write(self._repr(object, context, level)) + + def _pprint_dataclass( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + cls_name = object.__class__.__name__ + items = [ + (f.name, getattr(object, f.name)) + for f in _dataclasses.fields(object) + if f.repr + ] + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch: dict[ + Callable[..., str], + Callable[[PrettyPrinter, Any, IO[str], int, int, set[int], int], None], + ] = {} + + def _pprint_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("{") + items = sorted(object.items(), key=_safe_tuple) + self._format_dict_items(items, stream, indent, allowance, context, level) + write("}") + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + "(") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("[") + self._format_items(object, stream, indent, allowance, context, level) + stream.write("]") + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("(") + self._format_items(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write("{") + endchar = "}" + else: + stream.write(typ.__name__ + "({") + endchar = "})" + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance, context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r"\S*\s*", line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = "" + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write("(") + for i, rep in enumerate(chunks): + if i > 0: + write("\n" + " " * indent) + write(rep) + if level == 1: + write(")") + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write("(") + delim = "" + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = "\n" + " " * indent + if parens: + write(")") + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("bytearray(") + self._pprint_bytes( + bytes(object), stream, indent + 10, allowance + 1, context, level + 1 + ) + write(")") + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("mappingproxy(") + self._format(object.copy(), stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = "namespace" + else: + cls_name = object.__class__.__name__ + items = object.__dict__.items() + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(self._repr(key, context, level)) + write(": ") + self._format(ent, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _format_namespace_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(key) + write("=") + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format( + ent, + stream, + item_indent + len(key) + 1, + 1, + context, + level, + ) + + write(",") + + write("\n" + " " * indent) + + def _format_items( + self, + items: list[Any], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + + for item in items: + write(delimnl) + self._format(item, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _repr(self, object: Any, context: set[int], level: int) -> str: + return self._safe_repr(object, context.copy(), self._depth, level) + + def _pprint_default_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + rdf = self._repr(object.default_factory, context, level) + stream.write(f"{object.__class__.__name__}({rdf}, ") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + + if object: + stream.write("{") + items = object.most_common() + self._format_dict_items(items, stream, indent, allowance, context, level) + stream.write("}") + + stream.write(")") + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])): + stream.write(repr(object)) + return + + stream.write(object.__class__.__name__ + "(") + self._format_items(object.maps, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + if object.maxlen is not None: + stream.write(f"maxlen={object.maxlen}, ") + stream.write("[") + + self._format_items(object, stream, indent, allowance + 1, context, level) + stream.write("])") + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr( + self, object: Any, context: set[int], maxlevels: int | None, level: int + ) -> str: + typ = type(object) + if typ in _builtin_scalars: + return repr(object) + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}" + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}" + if objid in context: + return _recursion(object) + context.add(objid) + components: list[str] = [] + append = components.append + level += 1 + for k, v in sorted(object.items(), key=_safe_tuple): + krepr = self._safe_repr(k, context, maxlevels, level) + vrepr = self._safe_repr(v, context, maxlevels, level) + append(f"{krepr}: {vrepr}") + context.remove(objid) + return "{{{}}}".format(", ".join(components)) + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]" + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()" + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "..." + if objid in context: + return _recursion(object) + context.add(objid) + components = [] + append = components.append + level += 1 + for o in object: + orepr = self._safe_repr(o, context, maxlevels, level) + append(orepr) + context.remove(objid) + return format % ", ".join(components) + + return repr(object) + + +_builtin_scalars = frozenset( + {str, bytes, bytearray, float, complex, bool, type(None), int} +) + + +def _recursion(object: Any) -> str: + return f"" + + +def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]: + current = b"" + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i : i + 4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..cee70e3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import pprint +import reprlib + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as inner_exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(inner_exc)})" + return ( + f"<[{exc_info} raised in repr()] {type(obj).__name__} object at 0x{id(obj):x}>" + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: int | None, use_ascii: bool = False) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + self.use_ascii = use_ascii + + def repr(self, x: object) -> str: + try: + if self.use_ascii: + s = ascii(x) + else: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr( + obj: object, maxsize: int | None = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False +) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + return SafeRepr(maxsize, use_ascii).repr(obj) + + +def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: + """Return an unlimited-size safe repr-string for the given object. + + As with saferepr, failing __repr__ functions of user instances + will be represented with a short exception info. + + This function is a wrapper around simple repr. + + Note: a cleaner solution would be to alter ``saferepr``this way + when maxsize=None, but that might affect some other code. + """ + try: + if use_ascii: + return ascii(obj) + return repr(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000..9191b4e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,258 @@ +"""Helper functions for writing to terminals and files.""" + +from __future__ import annotations + +from collections.abc import Sequence +import os +import shutil +import sys +from typing import final +from typing import Literal +from typing import TextIO + +import pygments +from pygments.formatters.terminal import TerminalFormatter +from pygments.lexer import Lexer +from pygments.lexers.diff import DiffLexer +from pygments.lexers.python import PythonLexer + +from ..compat import assert_never +from .wcwidth import wcswidth + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if os.environ.get("NO_COLOR"): + return False + if os.environ.get("FORCE_COLOR"): + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: TextIO | None = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: int | None = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join(f"\x1b[{cod}m" for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + self.write_raw(msg, flush=flush) + + def write_raw(self, msg: str, *, flush: bool = False) -> None: + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + f"indents size ({len(indents)}) should have same size as lines ({len(lines)})" + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + # Would be better to strict=True but that fails some CI jobs. + for indent, new_line in zip(indents, new_lines, strict=False): + self.line(indent + new_line) + + def _get_pygments_lexer(self, lexer: Literal["python", "diff"]) -> Lexer: + if lexer == "python": + return PythonLexer() + elif lexer == "diff": + return DiffLexer() + else: + assert_never(lexer) + + def _get_pygments_formatter(self) -> TerminalFormatter: + from _pytest.config.exceptions import UsageError + + theme = os.getenv("PYTEST_THEME") + theme_mode = os.getenv("PYTEST_THEME_MODE", "dark") + + try: + return TerminalFormatter(bg=theme_mode, style=theme) + except pygments.util.ClassNotFound as e: + raise UsageError( + f"PYTEST_THEME environment variable has an invalid value: '{theme}'. " + "Hint: See available pygments styles with `pygmentize -L styles`." + ) from e + except pygments.util.OptionError as e: + raise UsageError( + f"PYTEST_THEME_MODE environment variable has an invalid value: '{theme_mode}'. " + "The allowed values are 'dark' (default) and 'light'." + ) from e + + def _highlight( + self, source: str, lexer: Literal["diff", "python"] = "python" + ) -> str: + """Highlight the given source if we have markup support.""" + if not source or not self.hasmarkup or not self.code_highlight: + return source + + pygments_lexer = self._get_pygments_lexer(lexer) + pygments_formatter = self._get_pygments_formatter() + + highlighted: str = pygments.highlight( + source, pygments_lexer, pygments_formatter + ) + # pygments terminal formatter may add a newline when there wasn't one. + # We don't want this, remove. + if highlighted[-1] == "\n" and source[-1] != "\n": + highlighted = highlighted[:-1] + + # Some lexers will not set the initial color explicitly + # which may lead to the previous color being propagated to the + # start of the expression, so reset first. + highlighted = "\x1b[0m" + highlighted + + return highlighted diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000..23886ff --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from functools import lru_cache +import unicodedata + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/error.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/error.py new file mode 100644 index 0000000..dace237 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/error.py @@ -0,0 +1,119 @@ +"""create errno-specific classes for IO or os calls.""" + +from __future__ import annotations + +from collections.abc import Callable +import errno +import os +import sys +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") + +R = TypeVar("R") + + +class Error(EnvironmentError): + def __repr__(self) -> str: + return "{}.{} {!r}: {} ".format( + self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + # repr(self.args) + ) + + def __str__(self) -> str: + s = "[{}]: {}".format( + self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + + +class ErrorMaker: + """lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + + _errno2class: dict[int, type[Error]] = {} + + def __getattr__(self, name: str) -> type[Error]: + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno: int) -> type[Error]: + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, f"UnknownErrno{eno}") + errorcls = type( + clsname, + (Error,), + {"__module__": "py.error", "__doc__": os.strerror(eno)}, + ) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + """Call a function and raise an errno-exception if applicable.""" + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except Error: + raise + except OSError as value: + if not hasattr(value, "errno"): + raise + if sys.platform == "win32": + try: + # error: Invalid index type "Optional[int]" for "dict[int, int]"; expected type "int" [index] + # OK to ignore because we catch the KeyError below. + cls = self._geterrnoclass(_winerrnomap[value.errno]) # type:ignore[index] + except KeyError: + raise value + else: + # we are not on Windows, or we got a proper OSError + if value.errno is None: + cls = type( + "UnknownErrnoNone", + (Error,), + {"__module__": "py.error", "__doc__": None}, + ) + else: + cls = self._geterrnoclass(value.errno) + + raise cls(f"{func.__name__}{args!r}") + + +_error_maker = ErrorMaker() +checked_call = _error_maker.checked_call + + +def __getattr__(attr: str) -> type[Error]: + return getattr(_error_maker, attr) # type: ignore[no-any-return] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/path.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/path.py new file mode 100644 index 0000000..b7131b0 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_py/path.py @@ -0,0 +1,1475 @@ +# mypy: allow-untyped-defs +"""local path implementation.""" + +from __future__ import annotations + +import atexit +from collections.abc import Callable +from contextlib import contextmanager +import fnmatch +import importlib.util +import io +import os +from os.path import abspath +from os.path import dirname +from os.path import exists +from os.path import isabs +from os.path import isdir +from os.path import isfile +from os.path import islink +from os.path import normpath +import posixpath +from stat import S_ISDIR +from stat import S_ISLNK +from stat import S_ISREG +import sys +from typing import Any +from typing import cast +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import uuid +import warnings + +from . import error + + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") + + +class Checkers: + _depend_on_existence = "exists", "link", "dir", "file" + + def __init__(self, path): + self.path = path + + def dotfile(self): + return self.path.basename.startswith(".") + + def ext(self, arg): + if not arg.startswith("."): + arg = "." + arg + return self.path.ext == arg + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + from .._code.source import getrawcode + + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == "not": + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError(f"no {name!r} checker available for {self.path!r}") + try: + if getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (error.ENOENT, error.ENOTDIR, error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = "not" + name + if name in kw: + if not kw.get(name): + return False + return True + + _statcache: Stat + + def _stat(self) -> Stat: + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + +class NeverRaised(Exception): + pass + + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) + elif not hasattr(rec, "__call__") and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort( + [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] + ) + if not self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if ( + pattern.find(path.sep) == -1 + and iswin32 + and pattern.find(posixpath.sep) != -1 + ): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = "*" + path.sep + pattern + return fnmatch.fnmatch(name, pattern) + + +def map_as_list(func, iter): + return list(map(func, iter)) + + +class Stat: + if TYPE_CHECKING: + + @property + def size(self) -> int: ... + + @property + def mtime(self) -> float: ... + + def __getattr__(self, name: str) -> Any: + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + + entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + @property + def group(self): + """Return group name of file.""" + if iswin32: + raise NotImplementedError("XXX win32") + import grp + + entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + + +def getuserid(user): + import pwd + + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] # type:ignore[attr-defined,unused-ignore] + return user + + +def getgroupid(group): + import grp + + if not isinstance(group, int): + group = grp.getgrnam(group)[2] # type:ignore[attr-defined,unused-ignore] + return group + + +class LocalPath: + """Object oriented interface to os.path and other local filesystem + related information. + """ + + class ImportMismatchError(ImportError): + """raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + + def __init__(self, path=None, expanduser=False): + """Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = error.checked_call(os.getcwd) + else: + try: + path = os.fspath(path) + except TypeError: + raise ValueError( + "can only pass None, Path instances " + "or non-empty strings to LocalPath" + ) + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + if sys.platform != "win32": + + def chown(self, user, group, rec=0): + """Change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + error.checked_call(os.chown, str(x), uid, gid) + error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self) -> str: + """Return value of a symbolic link.""" + # https://github.com/python/mypy/issues/12278 + return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value,unused-ignore] + + def mklinkto(self, oldname): + """Posix style hard link to another name.""" + error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """Create a symbolic link with the given value (pointing to another name).""" + if absolute: + error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(("..",) * n + (relsource,)) + error.checked_call(os.symlink, target, self.strpath) + + def __div__(self, other): + return self.join(os.fspath(other)) + + __truediv__ = __div__ # py3k + + @property + def basename(self): + """Basename part of path.""" + return self._getbyspec("basename")[0] + + @property + def dirname(self): + """Dirname part of path.""" + return self._getbyspec("dirname")[0] + + @property + def purebasename(self): + """Pure base name of the path.""" + return self._getbyspec("purebasename")[0] + + @property + def ext(self): + """Extension of the path (including the '.').""" + return self._getbyspec("ext")[0] + + def read_binary(self): + """Read and return a bytestring from reading the path.""" + with self.open("rb") as f: + return f.read() + + def read_text(self, encoding): + """Read and return a Unicode string from reading the path.""" + with self.open("r", encoding=encoding) as f: + return f.read() + + def read(self, mode="r"): + """Read and return a bytestring from reading the path.""" + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """Read and return a list of lines from the path. if cr is False, the + newline will be removed from the end of each line.""" + mode = "r" + + if not cr: + content = self.read(mode) + return content.split("\n") + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """(deprecated) return object unpickled from self.read()""" + f = self.open("rb") + try: + import pickle + + return error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """Move this path to target.""" + if target.relto(self): + raise error.EINVAL(target, "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def fnmatch(self, pattern): + """Return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """Return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, str | LocalPath): + raise TypeError(f"{relpath!r}: not a string or path object") + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + # assert strrelpath[-1] == self.sep + # assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, "_name", None) == "nt": + if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)): + return strself[len(strrelpath) :] + elif strself.startswith(strrelpath): + return strself[len(strrelpath) :] + return "" + + def ensure_dir(self, *args): + """Ensure the path joined with args is a directory.""" + return self.ensure(*args, dir=True) + + def bestrelpath(self, dest): + """Return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + lst = [os.pardir] * n + if reldest: + lst.append(reldest) + target = dest.sep.join(lst) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """Return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + lst = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + lst.append(current) + if not reverse: + lst.reverse() + return lst + + def common(self, other): + """Return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """Return new path object with 'other' added to the basename""" + return self.new(basename=self.basename + str(other)) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """Yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + yield from Visitor(fil, rec, ignore, bf, sort).gen(self) + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, "__call__"): + warnings.warn( + DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), + stacklevel=3, + ) + res.sort(sort) + else: + res.sort() + + def __fspath__(self): + return self.strpath + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = os.fspath(self) + try: + s2 = os.fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return os.fspath(self) < os.fspath(other) + + def __gt__(self, other): + return os.fspath(self) > os.fspath(other) + + def samefile(self, other): + """Return True if 'other' references the same file as 'self'.""" + other = os.fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return error.checked_call(os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """Remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + + error.checked_call( + shutil.rmtree, self.strpath, ignore_errors=ignore_errors + ) + else: + error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """Return hexdigest of hashvalue for this file.""" + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError(f"Don't know how to compute {hashtype!r} hash") + f = self.open("rb") + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """Create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, _basename, purebasename, ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext" + ) + if "basename" in kw: + if "purebasename" in kw or "ext" in kw: + raise ValueError(f"invalid specification {kw!r}") + else: + pb = kw.setdefault("purebasename", purebasename) + try: + ext = kw["ext"] + except KeyError: + pass + else: + if ext and not ext.startswith("."): + ext = "." + ext + kw["basename"] = pb + ext + + if "dirname" in kw and not kw["dirname"]: + kw["dirname"] = drive + else: + kw.setdefault("dirname", dirname) + kw.setdefault("sep", self.sep) + obj.strpath = normpath("{dirname}{sep}{basename}".format(**kw)) + return obj + + def _getbyspec(self, spec: str) -> list[str]: + """See new for what 'spec' can be.""" + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(",")) + for name in args: + if name == "drive": + res.append(parts[0]) + elif name == "dirname": + res.append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == "basename": + res.append(basename) + else: + i = basename.rfind(".") + if i == -1: + purebasename, ext = basename, "" + else: + purebasename, ext = basename[:i], basename[i:] + if name == "purebasename": + res.append(purebasename) + elif name == "ext": + res.append(ext) + else: + raise ValueError(f"invalid part specification {name!r}") + return res + + def dirpath(self, *args, **kwargs): + """Return the directory path joined with any given path arguments.""" + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return self.new(basename="").join(*args, **kwargs) + + def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath: + """Return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [os.fspath(arg) for arg in args] + strpath = self.strpath + if abs: + newargs: list[str] = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip("/") + arg = arg.replace("/", sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode="r", ensure=False, encoding=None): + """Return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return error.checked_call( + io.open, + self.strpath, + mode, + encoding=encoding, + ) + return error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + """Check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file = 1 # is a file + file = 0 # is not a file (may not even exist) + dir = 1 # is a dir + link = 1 # is a link + exists = 1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + if not kw: + kw = {"exists": 1} + return Checkers(self)._evaluate(kw) + + _patternchars = set("*?[" + os.sep) + + def listdir(self, fil=None, sort=None): + """List directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, str): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = FNMatcher(fil) + names = error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self) -> int: + """Return size of the underlying file object""" + return self.stat().size + + def mtime(self) -> float: + """Return last modification time of the path.""" + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """Copy path to target. + + If mode is True, will copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self != target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + + def rec(p): + return p.check(link=0) + + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """Rename this path to target.""" + target = os.fspath(target) + return error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """Pickle object into path location""" + f = self.open("wb") + import pickle + + try: + error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """Create & return the directory joined with args.""" + p = self.join(*args) + error.checked_call(os.mkdir, os.fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """Write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("wb") as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """Write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("w", encoding=encoding) as f: + f.write(data) + + def write(self, data, mode="w", ensure=False): + """Write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if "b" in mode: + if not isinstance(data, bytes): + raise ValueError("can only process bytes") + else: + if not isinstance(data, str): + if not isinstance(data, bytes): + data = str(data) + else: + data = data.decode(sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """Ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get("dir", 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open("wb").close() + return p + + @overload + def stat(self, raising: Literal[True] = ...) -> Stat: ... + + @overload + def stat(self, raising: Literal[False]) -> Stat | None: ... + + def stat(self, raising: bool = True) -> Stat | None: + """Return an os.stat() tuple.""" + if raising: + return Stat(self, error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self) -> Stat: + """Return an os.lstat() tuple.""" + return Stat(self, error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """Set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return error.checked_call(os.utime, self.strpath, mtime) + try: + return error.checked_call(os.utime, self.strpath, (-1, mtime)) + except error.EINVAL: + return error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """Change directory to self and return old current directory""" + try: + old = self.__class__() + except error.ENOENT: + old = None + error.checked_call(os.chdir, self.strpath) + return old + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """Return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """Return last access time of the path.""" + return self.stat().atime + + def __repr__(self): + return f"local({self.strpath!r})" + + def __str__(self): + """Return string representation of the Path.""" + return self.strpath + + def chmod(self, mode, rec=0): + """Change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError(f"mode {mode!r} must be an integer") + if rec: + for x in self.visit(rec=rec): + error.checked_call(os.chmod, str(x), mode) + error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath cannot be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join("__init__.py").exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """Return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise error.ENOENT(self) + + if ensuresyspath == "importlib": + if modname is None: + modname = self.purebasename + spec = importlib.util.spec_from_file_location(modname, str(self)) + if spec is None or spec.loader is None: + raise ImportError(f"Can't find module {modname} at location {self!s}") + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + assert modfile is not None + if modfile[-4:] in (".pyc", ".pyo"): + modfile = modfile[:-1] + elif modfile.endswith("$py.class"): + modfile = modfile[:-9] + ".py" + if modfile.endswith(os.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except error.ENOENT: + issame = False + if not issame: + ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH") + if ignore != "1": + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + with open(str(self), "rb") as f: + exec(f.read(), mod.__dict__) + except BaseException: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str: + """Return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import PIPE + from subprocess import Popen + + popen_opts.pop("stdout", None) + popen_opts.pop("stderr", None) + proc = Popen( + [str(self)] + [str(arg) for arg in argv], + **popen_opts, + stdout=PIPE, + stderr=PIPE, + ) + stdout: str | bytes + stdout, stderr = proc.communicate() + ret = proc.wait() + if isinstance(stdout, bytes): + stdout = stdout.decode(sys.getdefaultencoding()) + if ret != 0: + if isinstance(stderr, bytes): + stderr = stderr.decode(sys.getdefaultencoding()) + raise RuntimeError( + ret, + ret, + str(self), + stdout, + stderr, + ) + return stdout + + @classmethod + def sysfind(cls, name, checker=None, paths=None): + """Return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ["Path"].split(";") + if "" not in paths and "." not in paths: + paths.append(".") + try: + systemroot = os.environ["SYSTEMROOT"] + except KeyError: + pass + else: + paths = [ + path.replace("%SystemRoot%", systemroot) for path in paths + ] + else: + paths = os.environ["PATH"].split(":") + tryadd = [] + if iswin32: + tryadd += os.environ["PATHEXT"].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except error.EACCES: + pass + return None + + @classmethod + def _gethomedir(cls): + try: + x = os.environ["HOME"] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"] + except KeyError: + return None + return cls(x) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """Return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + + return local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """Return a Path object pointing to a fresh new temporary directory + (which we created ourselves). + """ + import tempfile + + if rootdir is None: + rootdir = cls.get_temproot() + path = error.checked_call(tempfile.mkdtemp, dir=str(rootdir)) + return cls(path) + + @classmethod + def make_numbered_dir( + cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800 + ): # two days + """Return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + + def parse_num(path): + """Parse the number out of a path (if it matches the prefix)""" + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix) :]) + except ValueError: + pass + + def create_lockfile(path): + """Exclusively create lockfile. Throws when failed""" + mypid = os.getpid() + lockfile = path.join(".lock") + if hasattr(lockfile, "mksymlinkto"): + lockfile.mksymlinkto(str(mypid)) + else: + fd = error.checked_call( + os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644 + ) + with os.fdopen(fd, "w") as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """Ensure lockfile is removed at process exit""" + mypid = os.getpid() + + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except error.Error: + pass + + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum + 1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (error.EEXIST, error.ENOENT, error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """Read file modification time""" + try: + return path.lstat().mtime + except error.Error: + pass + + garbage_prefix = prefix + "garbage-" + + def is_garbage(path): + """Check if path denotes directory scheduled for removal""" + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (error.EEXIST, error.ENOENT, error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ["USER"] # linux, et al + except KeyError: + try: + username = os.environ["USERNAME"] # windows + except KeyError: + username = "current" + + src = str(udir) + dest = src[: src.rfind("-")] + "-" + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + + +def copymode(src, dest): + """Copy permission from src to dst.""" + import shutil + + shutil.copymode(src, dest) + + +def copystat(src, dest): + """Copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open("rb") + try: + fdest = dest.open("wb") + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == "_"): + name = name.replace("_", "") + return not name or name.isalnum() + + +local = LocalPath diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_version.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_version.py new file mode 100644 index 0000000..0e57e54 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/_version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '9.0.0' +__version_tuple__ = version_tuple = (9, 0, 0) + +__commit_id__ = commit_id = None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..22f3ca8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,208 @@ +# mypy: allow-untyped-defs +"""Support for presenting detailed information in failing assertions.""" + +from __future__ import annotations + +from collections.abc import Generator +import sys +from typing import Any +from typing import Protocol +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook. " + "Make sure to delete any previously generated pyc cache files.", + ) + + parser.addini( + "truncation_limit_lines", + default=None, + help="Set threshold of LINES after which truncation will take effect", + ) + parser.addini( + "truncation_limit_chars", + default=None, + help=("Set threshold of CHARS after which truncation will take effect"), + ) + + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_ASSERTIONS, + help=( + "Specify a verbosity level for assertions, overriding the main level. " + "Higher levels will provide more detailed explanation when an assertion fails." + ), + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :param names: The module names to register. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + rewrite_hook: RewriteHook + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + rewrite_hook = hook + break + else: + rewrite_hook = DummyRewriteHook() + rewrite_hook.mark_rewrite(*names) + + +class RewriteHook(Protocol): + def mark_rewrite(self, *names: str) -> None: ... + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: rewrite.AssertionRewritingHook | None = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: Session) -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> str | None: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + try: + return (yield) + finally: + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: Session) -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> list[str] | None: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..566549d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1202 @@ +"""Rewrite assertion AST to produce nice error messages.""" + +from __future__ import annotations + +import ast +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Sequence +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +from pathlib import Path +from pathlib import PurePath +import struct +import sys +import tokenize +import types +from typing import IO +from typing import TYPE_CHECKING + + +if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources +else: + from importlib.abc import TraversableResources +if sys.version_info < (3, 11): + from importlib.readers import FileReader +else: + from importlib.resources.readers import FileReader + + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest._version import version +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.fixtures import FixtureFunctionDefinition +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + + +# fmt: off +from _pytest.assertion.util import format_explanation as _format_explanation # noqa:F401, isort:skip +# fmt:on + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +class Sentinel: + pass + + +assertstate_key = StashKey["AssertionState"]() + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + ((__debug__ and "c") or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +# Special marker that denotes we have just left a scope definition +_SCOPE_END_MARKER = Sentinel() + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Session | None = None + self._rewritten_names: dict[str, Path] = {} + self._must_rewrite: set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Session | None) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Sequence[str | bytes] | None = None, + target: types.ModuleType | None = None, + ) -> importlib.machinery.ModuleSpec | None: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace(f"find_module called for: {name}") + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + + if spec is None and path is not None: + # With --import-mode=importlib, PathFinder cannot find spec without modifying `sys.path`, + # causing inability to assert rewriting (#12659). + # At this point, try using the file path to find the module spec. + for _path_str in path: + spec = importlib.util.spec_from_file_location(name, _path_str) + if spec is not None: + break + + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> types.ModuleType | None: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: AssertionState) -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(*parts).with_suffix(".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: AssertionState) -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: AssertionState) -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + f"Module already imported so cannot be rewritten; {name}" + ), + stacklevel=5, + ) + + def get_data(self, pathname: str | bytes) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + def get_resource_reader(self, name: str) -> TraversableResources: + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) # type: ignore[arg-type] + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + with open(proc_pyc, "wb") as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + os.replace(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +def _rewrite_test(fn: Path, config: Config) -> tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> types.CodeType | None: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace(f"_read_pyc({source}): invalid pyc (too short)") + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace(f"_read_pyc({source}): invalid pyc (bad magic number)") + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace(f"_read_pyc({source}): invalid pyc (unsupported flags)") + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace(f"_read_pyc({source}): out of date") + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace(f"_read_pyc({source}): invalid pyc (incorrect size)") + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace(f"_read_pyc({source}): not a code object") + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: str | None = None, + config: Config | None = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + if isinstance(obj, types.MethodType): + # for bound methods, skip redundant information + return obj.__name__ + + maxsize = _get_maxsize_for_saferepr(util._config) + if not maxsize: + return saferepr_unlimited(obj).replace("\n", "\\n") + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Config | None) -> int | None: + """Get `maxsize` configuration for saferepr based on the given config object.""" + if config is None: + verbosity = 0 + else: + verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj, _get_maxsize_for_saferepr(util._config)) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + # For pytest fixtures the __repr__ method provides more information than the function name. + return isinstance(obj, FixtureFunctionDefinition) + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + ((is_or and " or ") or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls, strict=True): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: dict[int, str] = {} + + depth = 0 + lines: list[str] = [] + assert_lineno: int | None = None + seen_lines: set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escaped newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + :scope: A tuple containing the current scope used for variables_overwrite. + + :variables_overwrite: A dict filled with references to variables + that change value within an assert. This happens when a variable is + reassigned with the walrus operator + + This state, except the variables_overwrite, is reset on every new assert + statement visited and used by the other visitors. + """ + + def __init__( + self, module_path: str | None, config: Config | None, source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + self.scope: tuple[ast.AST, ...] = () + self.variables_overwrite: defaultdict[tuple[ast.AST, ...], dict[str, str]] = ( + defaultdict(dict) + ) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + for item in mod.body: + match item: + case ast.Expr(value=ast.Constant(value=str() as doc)) if ( + expect_docstring + ): + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + case ast.ImportFrom(level=0, module="__future__"): + pass + case _: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + self.scope = (mod,) + nodes: list[ast.AST | Sentinel] = [mod] + while nodes: + node = nodes.pop() + if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef | ast.ClassDef): + self.scope = tuple((*self.scope, node)) + nodes.append(_SCOPE_END_MARKER) + if node == _SCOPE_END_MARKER: + self.scope = self.scope[:-1] + continue + assert isinstance(node, ast.AST) + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: list[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.copy_location(ast.Name(name, ast.Load()), expr) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys: list[ast.expr | None] = [ast.Constant(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> list[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + import warnings + + from _pytest.warning_types import PytestAssertRewriteWarning + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: list[ast.stmt] = [] + self.variables: list[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: list[str] = [] + + self.stack: list[dict[str, ast.expr]] = [] + self.expl_stmts: list[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Constant(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Constant("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Constant(assert_.lineno), + ast.Constant(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + [*self.expl_stmts, hook_call_pass], + [], + ) + statements_pass: list[ast.stmt] = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables: list[ast.expr] = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Constant("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + if getattr(node, "lineno", None) is None: + # apply the assertion location to all generated ast nodes without source location + # and preserve the location of existing nodes or generated nodes with an correct location. + ast.copy_location(node, assert_) + return self.statements + + def visit_NamedExpr(self, name: ast.NamedExpr) -> tuple[ast.NamedExpr, str]: + # This method handles the 'walrus operator' repr of the target + # name if it's a local variable or _should_repr_global_name() + # thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + target_id = name.target.id + inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(target_id)) + return name, self.explanation_param(expr) + + def visit_Name(self, name: ast.Name) -> tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: list[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821 + self.expl_stmts = fail_inner + match v: + # Check if the left operand is an ast.NamedExpr and the value has already been visited + case ast.Compare( + left=ast.NamedExpr(target=ast.Name(id=target_id)) + ) if target_id in [ + e.id for e in boolop.values[:i] if hasattr(e, "id") + ]: + pytest_temp = self.variable() + self.variables_overwrite[self.scope][target_id] = v.left # type:ignore[assignment] + # mypy's false positive, we're checking that the 'target' attribute exists. + v.left.target.id = pytest_temp # type:ignore[attr-defined] + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Constant(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: list[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.copy_location(ast.UnaryOp(unary.op, operand_res), unary)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign( + ast.copy_location(ast.BinOp(left_expr, binop.op, right_expr), binop) + ) + return res, explanation + + def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( + self.scope, {} + ): + arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment] + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + match keyword.value: + case ast.Name(id=id) if id in self.variables_overwrite.get( + self.scope, {} + ): + keyword.value = self.variables_overwrite[self.scope][id] # type:ignore[assignment] + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.copy_location(ast.Call(new_func, new_args, new_kwargs), call) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign( + ast.copy_location(ast.Attribute(value, attr.attr, ast.Load()), attr) + ) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]: + self.push_format_context() + # We first check if we have overwritten a variable in the previous assert + match comp.left: + case ast.Name(id=name_id) if name_id in self.variables_overwrite.get( + self.scope, {} + ): + comp.left = self.variables_overwrite[self.scope][name_id] # type: ignore[assignment] + case ast.NamedExpr(target=ast.Name(id=target_id)): + self.variables_overwrite[self.scope][target_id] = comp.left # type: ignore[assignment] + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, ast.Compare | ast.BoolOp): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators, strict=True) + expls: list[ast.expr] = [] + syms: list[ast.expr] = [] + results = [left_res] + for i, op, next_operand in it: + match (next_operand, left_res): + case ( + ast.NamedExpr(target=ast.Name(id=target_id)), + ast.Name(id=name_id), + ) if target_id == name_id: + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][name_id] = next_operand # type: ignore[assignment] + + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, ast.Compare | ast.BoolOp): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Constant(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Constant(expl)) + res_expr = ast.copy_location(ast.Compare(left_res, [op], [next_res]), comp) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + # + # squashfuse_ll returns ENOSYS "OSError: [Errno 38] Function not + # implemented" for a read-only error + if e.errno in {errno.EROFS, errno.ENOSYS}: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..5820e6e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,137 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI. +""" + +from __future__ import annotations + +from _pytest.compat import running_on_ci +from _pytest.config import Config +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = DEFAULT_MAX_LINES * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation: list[str], item: Item) -> list[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + should_truncate, max_lines, max_chars = _get_truncation_parameters(item) + if should_truncate: + return _truncate_explanation( + explanation, + max_lines=max_lines, + max_chars=max_chars, + ) + return explanation + + +def _get_truncation_parameters(item: Item) -> tuple[bool, int, int]: + """Return the truncation parameters related to the given item, as (should truncate, max lines, max chars).""" + # We do not need to truncate if one of conditions is met: + # 1. Verbosity level is 2 or more; + # 2. Test is being run in CI environment; + # 3. Both truncation_limit_lines and truncation_limit_chars + # .ini parameters are set to 0 explicitly. + max_lines = item.config.getini("truncation_limit_lines") + max_lines = int(max_lines if max_lines is not None else DEFAULT_MAX_LINES) + + max_chars = item.config.getini("truncation_limit_chars") + max_chars = int(max_chars if max_chars is not None else DEFAULT_MAX_CHARS) + + verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + should_truncate = verbose < 2 and not running_on_ci() + should_truncate = should_truncate and (max_lines > 0 or max_chars > 0) + + return should_truncate, max_lines, max_chars + + +def _truncate_explanation( + input_lines: list[str], + max_lines: int, + max_chars: int, +) -> list[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either max_lines, or max_chars - whichever the input reaches + first, taking the truncation explanation into account. The remaining lines + will be replaced by a usage message. + """ + # Check if truncation required + input_char_count = len("".join(input_lines)) + # The length of the truncation explanation depends on the number of lines + # removed but is at least 68 characters: + # The real value is + # 64 (for the base message: + # '...\n...Full output truncated (1 line hidden), use '-vv' to show")' + # ) + # + 1 (for plural) + # + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1) + # + 3 for the '...' added to the truncated line + # But if there's more than 100 lines it's very likely that we're going to + # truncate, so we don't need the exact value using log10. + tolerable_max_chars = ( + max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...' + ) + # The truncation explanation add two lines to the output + tolerable_max_lines = max_lines + 2 + if ( + len(input_lines) <= tolerable_max_lines + and input_char_count <= tolerable_max_chars + ): + return input_lines + # Truncate first to max_lines, and then truncate to max_chars if necessary + if max_lines > 0: + truncated_explanation = input_lines[:max_lines] + else: + truncated_explanation = input_lines + truncated_char = True + # We reevaluate the need to truncate chars following removal of some lines + if len("".join(truncated_explanation)) > tolerable_max_chars and max_chars > 0: + truncated_explanation = _truncate_by_char_count( + truncated_explanation, max_chars + ) + else: + truncated_char = False + + if truncated_explanation == input_lines: + # No truncation happened, so we do not need to add any explanations + return truncated_explanation + + truncated_line_count = len(input_lines) - len(truncated_explanation) + if truncated_explanation[-1]: + # Add ellipsis and take into account part-truncated final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + if truncated_char: + # It's possible that we did not remove any char from this line + truncated_line_count += 1 + else: + # Add proper ellipsis when we were able to fit a full line exactly + truncated_explanation[-1] = "..." + return [ + *truncated_explanation, + "", + f"...Full output truncated ({truncated_line_count} line" + f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}", + ] + + +def _truncate_by_char_count(input_lines: list[str], max_chars: int) -> list[str]: + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/util.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..f35d83a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/assertion/util.py @@ -0,0 +1,615 @@ +# mypy: allow-untyped-defs +"""Utilities for assertion debugging.""" + +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Mapping +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import pprint +from typing import Any +from typing import Literal +from typing import Protocol +from unicodedata import normalize + +from _pytest import outcomes +import _pytest._code +from _pytest._io.pprint import PrettyPrinter +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest.compat import running_on_ci +from _pytest.config import Config + + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Callable[[str, object, object], str | None] | None = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Callable[[int, str, str], None] | None = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Config | None = None + + +class _HighlightFunc(Protocol): + def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Apply highlighting to the given source.""" + + +def dummy_highlighter(source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Dummy highlighter that returns the text unprocessed. + + Needed for _notin_text, as the diff gets post-processed to only show the "+" part. + """ + return source + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> list[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> list[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, set | frozenset) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except Exception: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated " in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare( + config, op: str, left: Any, right: Any, use_ascii: bool = False +) -> list[str] | None: + """Return specialised explanations for some operators/operands.""" + verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. + # See issue #3246. + use_ascii = ( + isinstance(left, str) + and isinstance(right, str) + and normalize("NFD", left) == normalize("NFD", right) + ) + + if verbose > 1: + left_repr = saferepr_unlimited(left, use_ascii=use_ascii) + right_repr = saferepr_unlimited(right, use_ascii=use_ascii) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + + left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii) + right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) + + summary = f"{left_repr} {op} {right_repr}" + highlighter = config.get_terminal_writer()._highlight + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, highlighter, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + elif op == "!=": + if isset(left) and isset(right): + explanation = ["Both sets are equal"] + elif op == ">=": + if isset(left) and isset(right): + explanation = _compare_gte_set(left, right, highlighter, verbose) + elif op == "<=": + if isset(left) and isset(right): + explanation = _compare_lte_set(left, right, highlighter, verbose) + elif op == ">": + if isset(left) and isset(right): + explanation = _compare_gt_set(left, right, highlighter, verbose) + elif op == "<": + if isset(left) and isset(right): + explanation = _compare_lt_set(left, right, highlighter, verbose) + + except outcomes.Exit: + raise + except Exception: + repr_crash = _pytest._code.ExceptionInfo.from_current()._getreprcrash() + explanation = [ + f"(pytest_assertion plugin: representation of details failed: {repr_crash}.", + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + if explanation[0] != "": + explanation = ["", *explanation] + return [summary, *explanation] + + +def _compare_eq_any( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, highlighter, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) is type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, highlighter, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, highlighter, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, highlighter, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, highlighter, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, highlighter, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text( + left: str, right: str, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: list[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + f"Skipping {i} identical leading characters in diff, use -v to show" + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + f"Skipping {i} identical trailing " + "characters in diff, use -v to show" + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_iterable( + left: Iterable[Any], + right: Iterable[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = PrettyPrinter().pformat(left).splitlines() + right_formatting = PrettyPrinter().pformat(right).splitlines() + + explanation = ["", "Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.rstrip() + for line in difflib.ndiff(right_formatting, left_formatting) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], + right: Sequence[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: list[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation.append( + f"At index {i} diff:" + f" {highlighter(repr(left_value))} != {highlighter(repr(right_value))}" + ) + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [ + f"{dir_with_more} contains one more item: {highlighter(extra)}" + ] + else: + explanation += [ + f"{dir_with_more} contains {len_diff} more items, first extra item: {highlighter(extra)}" + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = [] + explanation.extend(_set_one_sided_diff("left", left, right, highlighter)) + explanation.extend(_set_one_sided_diff("right", right, left, highlighter)) + return explanation + + +def _compare_gt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_gte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_lt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_lte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_gte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("right", right, left, highlighter) + + +def _compare_lte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("left", left, right, highlighter) + + +def _set_one_sided_diff( + posn: str, + set1: AbstractSet[Any], + set2: AbstractSet[Any], + highlighter: _HighlightFunc, +) -> list[str]: + explanation = [] + diff = set1 - set2 + if diff: + explanation.append(f"Extra items in the {posn} set:") + for item in diff: + explanation.append(highlighter(saferepr(item))) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], + right: Mapping[Any, Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation: list[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [f"Omitting {len(same)} identical items, use -vv to show"] + elif same: + explanation += ["Common items:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [ + highlighter(saferepr({k: left[k]})) + + " != " + + highlighter(saferepr({k: right[k]})) + ] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + f"Left contains {len_extra_left} more item{'' if len_extra_left == 1 else 's'}:" + ) + explanation.extend( + highlighter(pprint.pformat({k: left[k] for k in extra_left})).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + f"Right contains {len_extra_right} more item{'' if len_extra_right == 1 else 's'}:" + ) + explanation.extend( + highlighter(pprint.pformat({k: right[k] for k in extra_right})).splitlines() + ) + return explanation + + +def _compare_eq_cls( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int +) -> list[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append(f"Omitting {len(same)} identical items, use -vv to show") + elif same: + explanation += ["Matching attributes:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += highlighter(pprint.pformat(diff)).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + f"Drill down into differing attribute {field}:", + f"{indent}{field}: {highlighter(repr(field_left))} != {highlighter(repr(field_right))}", + ] + explanation += [ + indent + line + for line in _compare_eq_any( + field_left, field_right, highlighter, verbose + ) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> list[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, dummy_highlighter, verbose) + newdiff = [f"{saferepr(term, maxsize=42)} is contained here:"] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/cacheprovider.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..4383f10 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,646 @@ +# mypy: allow-untyped-defs +"""Implementation of the cache provider.""" + +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Iterable +import dataclasses +import errno +import json +import os +from pathlib import Path +import tempfile +from typing import final + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.nodes import Directory +from _pytest.nodes import File +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@dataclasses.dataclass +class Cache: + """Instance of the `cache` fixture.""" + + _cachedir: Path = dataclasses.field(repr=False) + _config: Config = dataclasses.field(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def _mkdir(self, path: Path) -> None: + self._ensure_cache_dir_and_supporting_files() + path.mkdir(exist_ok=True, parents=True) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + self._mkdir(res) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r", encoding="UTF-8") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + self._mkdir(path.parent) + except OSError as exc: + self.warn( + f"could not create cache path {path}: {exc}", + _ispytest=True, + ) + return + data = json.dumps(value, ensure_ascii=False, indent=2) + try: + f = path.open("w", encoding="UTF-8") + except OSError as exc: + self.warn( + f"cache could not write path {path}: {exc}", + _ispytest=True, + ) + else: + with f: + f.write(data) + + def _ensure_cache_dir_and_supporting_files(self) -> None: + """Create the cache dir and its supporting files.""" + if self._cachedir.is_dir(): + return + + self._cachedir.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory( + prefix="pytest-cache-files-", + dir=self._cachedir.parent, + ) as newpath: + path = Path(newpath) + + # Reset permissions to the default, see #12308. + # Note: there's no way to get the current umask atomically, eek. + umask = os.umask(0o022) + os.umask(umask) + path.chmod(0o777 - umask) + + with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f: + f.write(README_CONTENT) + with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f: + f.write("# Created by pytest automatically.\n*\n") + with open(path.joinpath("CACHEDIR.TAG"), "xb") as f: + f.write(CACHEDIR_TAG_CONTENT) + + try: + path.rename(self._cachedir) + except OSError as e: + # If 2 concurrent pytests both race to the rename, the loser + # gets "Directory not empty" from the rename. In this case, + # everything is handled so just continue (while letting the + # temporary directory be cleaned up). + # On Windows, the error is a FileExistsError which translates to EEXIST. + if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): + raise + else: + # Create a directory in place of the one we just moved so that + # `TemporaryDirectory`'s cleanup doesn't complain. + # + # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10. + # See https://github.com/python/cpython/issues/74168. Note that passing + # delete=False would do the wrong thing in case of errors and isn't supported + # until python 3.12. + path.mkdir() + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Generator[None, CollectReport, CollectReport]: + res = yield + if isinstance(collector, Session | Directory): + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + # Use stable sort to prioritize last failed. + def sort_key(node: nodes.Item | nodes.Collector) -> bool: + return node.path in lf_paths + + res.result = sorted( + res.result, + key=sort_key, + reverse=True, + ) + + elif isinstance(collector, File): + if collector.path in self.lfplugin._last_failed_paths: + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return res + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + + return res + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> CollectReport | None: + if isinstance(collector, File): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: int | None = None + self._report_status: str | None = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> set[Path]: + """Return a set with all Paths of the previously failed nodeids and + their parents.""" + rootpath = self.config.rootpath + result = set() + for nodeid in self.lastfailed: + path = rootpath / nodeid.split("::")[0] + result.add(path) + result.update(path.parents) + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> str | None: + if self.active and self.config.get_verbosity() >= 0: + return f"run-last-failure: {self._report_status}" + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> Generator[None]: + res = yield + + if not self.active: + return res + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = ( + f"{len(self.lastfailed)} known failures not in selected tests" + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = ( + f"rerun previous {self._previously_failed_count} {noun}{suffix}" + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += f" (skipped {self._skipped_files} {files_noun})" + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + return res + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]: + res = yield + + if self.active: + new_items: dict[str, nodes.Item] = {} + other_items: dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + return res + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + """Add command-line options for cache functionality. + + :param parser: Parser object to add command-line options to. + """ + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="Rerun only the tests that failed at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="Run all tests, but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="Run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "Show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="Remove all cache contents at start of test run", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="With ``--lf``, determines whether to execute tests when there " + "are no previously (known) failures or when no " + "cached ``lastfailed`` data was found. " + "``all`` (the default) runs the full test suite again. " + "``none`` just emits a message about no known failures and exits successfully.", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.cacheshow and not config.option.help: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + """Configure cache system and register related plugins. + + Creates the Cache instance and registers the last-failed (LFPlugin) + and new-first (NFPlugin) plugins with the plugin manager. + + :param config: pytest configuration object. + """ + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> str | None: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + """Display cache contents when --cache-show is used. + + Shows cached values and directories matching the specified glob pattern + (default: '*'). Displays cache location, cached test results, and + any cached directories created by plugins. + + :param config: pytest configuration object. + :param session: pytest session object. + :returns: Exit code (0 for success). + """ + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", f"cache values for {glob!r}") + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line(f"{key} contains unreadable content, will be ignored") + else: + tw.line(f"{key} contains:") + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", f"cache directories for {glob!r}") + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size}") + return 0 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/capture.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/capture.py new file mode 100644 index 0000000..6d98676 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/capture.py @@ -0,0 +1,1144 @@ +# mypy: allow-untyped-defs +"""Per-test stdout/stderr capturing mechanism.""" + +from __future__ import annotations + +import abc +import collections +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +import contextlib +import io +from io import UnsupportedOperation +import os +import sys +from tempfile import TemporaryFile +from types import TracebackType +from typing import Any +from typing import AnyStr +from typing import BinaryIO +from typing import cast +from typing import Final +from typing import final +from typing import Generic +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from typing_extensions import Self + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item +from _pytest.reports import CollectReport + + +_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="Per-test capturing method: one of fd|sys|no|tee-sys", + ) + group._addoption( # private to use reserved lower-case short option + "-s", + action="store_const", + const="no", + dest="capture", + help="Shortcut for --capture=no", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _readline_workaround() -> None: + """Ensure readline is imported early so it attaches to the correct stdio handles. + + This isn't a problem with the default GNU readline implementation, but in + some configurations, Python uses libedit instead (on macOS, and for prebuilt + binaries such as used by uv). + + In theory this is only needed if readline.backend == "libedit", but the + workaround consists of importing readline here, so we already worked around + the issue by the time we could check if we need to. + """ + try: + import readline # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable,unused-ignore] + return + + raw_stdout = stream.buffer.raw if hasattr(stream.buffer, "raw") else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined,unused-ignore] + return + + def _reopen_stdio(f, mode): + if not hasattr(stream.buffer, "raw") and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(wrapper=True) +def pytest_load_initial_conftests(early_config: Config) -> Generator[None]: + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + try: + try: + yield + finally: + capman.suspend_global_capture() + except BaseException: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + raise + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + assert hasattr(self.buffer, "mode") + return cast(str, self.buffer.mode.replace("b", "")) + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput(TextIO): + @property + def encoding(self) -> str: + assert sys.__stdin__ is not None + return sys.__stdin__.encoding + + def read(self, size: int = -1) -> str: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + + def __next__(self) -> str: + return self.readline() + + def readlines(self, hint: int | None = -1) -> list[str]: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + def __iter__(self) -> Iterator[str]: + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def flush(self) -> None: + raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + def readable(self) -> bool: + return False + + def seek(self, offset: int, whence: int = 0) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)") + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()") + + def truncate(self, size: int | None = None) -> int: + raise UnsupportedOperation("cannot truncate stdin") + + def write(self, data: str) -> int: + raise UnsupportedOperation("cannot write to stdin") + + def writelines(self, lines: Iterable[str]) -> None: + raise UnsupportedOperation("Cannot write to stdin") + + def writable(self) -> bool: + return False + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + pass + + @property + def buffer(self) -> BinaryIO: + # The str/bytes doesn't actually matter in this type, so OK to fake. + return self # type: ignore[return-value] + + +# Capture classes. + + +class CaptureBase(abc.ABC, Generic[AnyStr]): + EMPTY_BUFFER: AnyStr + + @abc.abstractmethod + def __init__(self, fd: int) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def done(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def suspend(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def resume(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def writeorg(self, data: AnyStr) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def snap(self) -> AnyStr: + raise NotImplementedError() + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture(CaptureBase[str]): + EMPTY_BUFFER = "" + + def __init__(self, fd: int) -> None: + pass + + def start(self) -> None: + pass + + def done(self) -> None: + pass + + def suspend(self) -> None: + pass + + def resume(self) -> None: + pass + + def snap(self) -> str: + return "" + + def writeorg(self, data: str) -> None: + pass + + +class SysCaptureBase(CaptureBase[AnyStr]): + def __init__( + self, fd: int, tmpfile: TextIO | None = None, *, tee: bool = False + ) -> None: + name = patchsysdict[fd] + self._old: TextIO = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + (hasattr(self, "_old") and repr(self._old)) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + (hasattr(self, "_old") and repr(self._old)) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert self._state in states, ( + "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + +class SysCaptureBinary(SysCaptureBase[bytes]): + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBase[str]): + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + assert isinstance(self.tmpfile, CaptureIO) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBase(CaptureBase[AnyStr]): + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: int | None = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull, encoding="utf-8") + self.syscapture: CaptureBase[str] = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture(targetfd) + + self._state = "initialized" + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.targetfd} oldfd={self.targetfd_save} " + f"_state={self._state!r} tmpfile={self.tmpfile!r}>" + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert self._state in states, ( + "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + +class FDCaptureBinary(FDCaptureBase[bytes]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res # type: ignore[return-value] + + def writeorg(self, data: bytes) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBase[str]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + # XXX use encoding of original stream + os.write(self.targetfd_save, data.encode("utf-8")) + + +# MultiCapture + + +# Generic NamedTuple only supported since Python 3.11. +if sys.version_info >= (3, 11) or TYPE_CHECKING: + + @final + class CaptureResult(NamedTuple, Generic[AnyStr]): + """The result of :method:`caplog.readouterr() `.""" + + out: AnyStr + err: AnyStr + +else: + + class CaptureResult( + collections.namedtuple("CaptureResult", ["out", "err"]), # noqa: PYI024 + Generic[AnyStr], + ): + """The result of :method:`caplog.readouterr() `.""" + + __slots__ = () + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__( + self, + in_: CaptureBase[AnyStr] | None, + out: CaptureBase[AnyStr] | None, + err: CaptureBase[AnyStr] | None, + ) -> None: + self.in_: CaptureBase[AnyStr] | None = in_ + self.out: CaptureBase[AnyStr] | None = out + self.err: CaptureBase[AnyStr] | None = err + + def __repr__(self) -> str: + return ( + f"" + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + assert self.out is not None + self.out.writeorg(out) + if err: + assert self.err is not None + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + assert self.in_ is not None + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + # TODO: This type error is real, need to fix. + return CaptureResult(out, err) # type: ignore[arg-type] + + +def _get_multicapture(method: _CaptureMethod) -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: _CaptureMethod) -> None: + self._method: Final = method + self._global_capturing: MultiCapture[str] | None = None + self._capture_fixture: CaptureFixture[Any] | None = None + + def __repr__(self) -> str: + return ( + f"" + ) + + def is_capturing(self) -> str | bool: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return f"fixture {self._capture_fixture.request.fixturename}" + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: CaptureFixture[Any]) -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + f"cannot use {requested_fixture} and {current_fixture} at the same time" + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: Collector + ) -> Generator[None, CollectReport, CollectReport]: + if isinstance(collector, File): + self.resume_global_capture() + try: + rep = yield + finally: + self.suspend_global_capture() + out, err = self.read_global_capture() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + rep = yield + return rep + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None]: + with self.item_capture("setup", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None]: + with self.item_capture("call", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None]: + with self.item_capture("teardown", item): + return (yield) + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, + captureclass: type[CaptureBase[AnyStr]], + request: SubRequest, + *, + config: dict[str, Any] | None = None, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.captureclass: type[CaptureBase[AnyStr]] = captureclass + self.request = request + self._config = config if config else {} + self._capture: MultiCapture[AnyStr] | None = None + self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER + self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1, **self._config), + err=self.captureclass(2, **self._config), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( + "capturemanager" + ) + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capteesys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable simultaneous text capturing and pass-through of writes + to ``sys.stdout`` and ``sys.stderr`` as defined by ``--capture=``. + + + The captured output is made available via ``capteesys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + The output is also passed-through, allowing it to be "live-printed", + reported, or both as defined by ``--capture=``. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capteesys): + print("hello") + captured = capteesys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture( + SysCapture, request, config=dict(tee=True), _ispytest=True + ) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capfd.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/compat.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/compat.py new file mode 100644 index 0000000..2f5a4c8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/compat.py @@ -0,0 +1,313 @@ +# mypy: allow-untyped-defs +"""Python version compatibility code and random general utilities.""" + +from __future__ import annotations + +from collections.abc import Callable +import enum +import functools +import inspect +from inspect import Parameter +from inspect import Signature +import os +from pathlib import Path +import sys +from typing import Any +from typing import Final +from typing import NoReturn + +import py + + +if sys.version_info >= (3, 14): + from annotationlib import Format + + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: Final = NotSetType.token +# fmt: on + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin coroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def signature(obj: Callable[..., Any]) -> Signature: + """Return signature without evaluating annotations.""" + if sys.version_info >= (3, 14): + return inspect.signature(obj, annotation_format=Format.STRING) + return inspect.signature(obj) + + +def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return f"{relfn}:{lineno + 1}" + return f"{fn}:{lineno + 1}" + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., object], + *, + name: str = "", + cls: type | None = None, +) -> tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The cls arguments indicate that the function should be treated as a bound + method even though it's not unless the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters.values() + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, and its first parameter + # wasn't defined as positional only, remove the first parameter name. + if not any(p.kind is Parameter.POSITIONAL_ONLY for p in parameters) and ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def ascii_escaped(val: bytes | str) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes strings into a sequence of escaped unicode ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = val.decode("ascii", "backslashreplace") + else: + ret = val.encode("unicode_escape").decode("ascii") + return ret.translate(_non_printable_ascii_translate_table) + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + :func:`functools.wraps`, or :func:`functools.partial`.""" + obj = inspect.unwrap(obj) + + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +def get_user_id() -> int | None: + """Return the current process's real user id or None if it could not be + determined. + + :return: The user id or None if it could not be determined. + """ + # mypy follows the version and platform checking expectation of PEP 484: + # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks + # Containment checks are too complex for mypy v1.5.0 and cause failure. + if sys.platform == "win32" or sys.platform == "emscripten": + # win32 does not have a getuid() function. + # Emscripten has a return 0 stub. + return None + else: + # On other platforms, a return value of -1 is assumed to indicate that + # the current process's real user id could not be determined. + ERROR = -1 + uid = os.getuid() + return uid if uid != ERROR else None + + +if sys.version_info >= (3, 11): + from typing import assert_never +else: + + def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" + + +class CallableBool: + """ + A bool-like object that can also be called, returning its true/false value. + + Used for backwards compatibility in cases where something was supposed to be a method + but was implemented as a simple attribute by mistake (see `TerminalReporter.isatty`). + + Do not use in new code. + """ + + def __init__(self, value: bool) -> None: + self._value = value + + def __bool__(self) -> bool: + return self._value + + def __call__(self) -> bool: + return self._value + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + # Only enable CI mode if one of these env variables is defined and non-empty. + env_vars = ["CI", "BUILD_NUMBER"] + return any(os.environ.get(var) for var in env_vars) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..9b2afe3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/__init__.py @@ -0,0 +1,2166 @@ +# mypy: allow-untyped-defs +"""Command line options, config-file and conftest.py processing.""" + +from __future__ import annotations + +import argparse +import builtins +import collections.abc +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import contextlib +import copy +import dataclasses +import enum +from functools import lru_cache +import glob +import importlib.metadata +import inspect +import os +import pathlib +import re +import shlex +import sys +from textwrap import dedent +import types +from types import FunctionType +from typing import Any +from typing import cast +from typing import Final +from typing import final +from typing import IO +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy +from pluggy import HookimplMarker +from pluggy import HookimplOpts +from pluggy import HookspecMarker +from pluggy import HookspecOpts +from pluggy import PluginManager + +from .compat import PathAwareHookProxy +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest import __version__ +import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._code.code import TracebackStyle +from _pytest._io import TerminalWriter +from _pytest.compat import assert_never +from _pytest.config.argparsing import Argument +from _pytest.config.argparsing import FILE_OR_DIR +from _pytest.config.argparsing import Parser +import _pytest.deprecated +import _pytest.hookspec +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.pathlib import safe_exists +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import warn_explicit_for + + +if TYPE_CHECKING: + from _pytest.assertion.rewrite import AssertionRewritingHook + from _pytest.cacheprovider import Cache + from _pytest.terminal import TerminalReporter + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + __module__ = "pytest" + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: pathlib.Path, + *, + cause: Exception, + ) -> None: + self.path = path + self.cause = cause + + def __str__(self) -> str: + return f"{type(self.cause).__name__}: {self.cause} (from {self.path})" + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def print_conftest_import_error(e: ConftestImportFailure, file: TextIO) -> None: + exc_info = ExceptionInfo.from_exception(e.cause) + tw = TerminalWriter(file) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + + +def print_usage_error(e: UsageError, file: TextIO) -> None: + tw = TerminalWriter(file) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + + +def main( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> int | ExitCode: + """Perform an in-process test run. + + :param args: + List of command line arguments. If `None` or not given, defaults to reading + arguments directly from the process command line (:data:`sys.argv`). + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + # Handle a single `--version` argument early to avoid starting up the entire pytest infrastructure. + new_args = sys.argv[1:] if args is None else args + if isinstance(new_args, Sequence) and new_args.count("--version") == 1: + sys.stdout.write(f"pytest {__version__}\n") + return ExitCode.OK + + old_pytest_version = os.environ.get("PYTEST_VERSION") + try: + os.environ["PYTEST_VERSION"] = __version__ + try: + config = _prepareconfig(new_args, plugins) + except ConftestImportFailure as e: + print_conftest_import_error(e, file=sys.stderr) + return ExitCode.USAGE_ERROR + + try: + ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + print_usage_error(e, file=sys.stderr) + return ExitCode.USAGE_ERROR + finally: + if old_pytest_version is None: + os.environ.pop("PYTEST_VERSION", None) + else: + os.environ["PYTEST_VERSION"] = old_pytest_version + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = ( + *essential_plugins, + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "setuponly", + "setupplan", + "stepwise", + "unraisableexception", + "threadexception", + "warnings", + "logging", + "reports", + "faulthandler", + "subtests", +) + +builtin_plugins = { + *default_plugins, + "pytester", + "pytester_assertions", +} + + +def get_config( + args: Iterable[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + # Subsequent calls to main will create a fresh instance. + pluginmanager = PytestPluginManager() + invocation_params = Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=pathlib.Path.cwd(), + ) + config = Config(pluginmanager, invocation_params=invocation_params) + + if invocation_params.args: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(invocation_params.args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> PytestPluginManager: + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: list[str] | os.PathLike[str], + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + if isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = ( # type:ignore[unreachable] + "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + ) + raise TypeError(msg.format(args, type(args))) + + initial_config = get_config(args, plugins) + pluginmanager = initial_config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config: Config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + initial_config._ensure_unconfigure() + raise + + +def _get_directory(path: pathlib.Path) -> pathlib.Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +def _get_legacy_hook_marks( + method: Any, + hook_type: str, + opt_names: tuple[str, ...], +) -> dict[str, bool]: + if TYPE_CHECKING: + # abuse typeguard from importlib to avoid massive method type union that's lacking an alias + assert inspect.isroutine(method) + known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: list[str] = [] + opts: dict[str, bool] = {} + for opt_name in opt_names: + opt_attr = getattr(method, opt_name, AttributeError) + if opt_attr is not AttributeError: + must_warn.append(f"{opt_name}={opt_attr}") + opts[opt_name] = True + elif opt_name in known_marks: + must_warn.append(f"{opt_name}=True") + opts[opt_name] = True + else: + opts[opt_name] = False + if must_warn: + hook_opts = ", ".join(must_warn) + message = _pytest.deprecated.HOOK_LEGACY_MARKING.format( + type=hook_type, + fullname=method.__qualname__, + hook_opts=hook_opts, + ) + warn_explicit_for(cast(FunctionType, method), message) + return opts + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + from _pytest.assertion import DummyRewriteHook + from _pytest.assertion import RewriteHook + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: dict[pathlib.Path, list[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: pathlib.Path | None = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: list[tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook: RewriteHook = DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts( + self, plugin: _PluggyPlugin, name: str + ) -> HookimplOpts | None: + """:meta private:""" + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return None + # Ignore names which cannot be hooks. + if name == "pytest_plugins": + return None + + opts = super().parse_hookimpl_opts(plugin, name) + if opts is not None: + return opts + + method = getattr(plugin, name) + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return None + # Collect unmarked hooks as long as they have the `pytest_' prefix. + legacy = _get_legacy_hook_marks( + method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") + ) + return cast(HookimplOpts, legacy) + + def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: + """:meta private:""" + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + legacy = _get_legacy_hook_marks( + method, "spec", ("firstresult", "historic") + ) + opts = cast(HookspecOpts, legacy) + return opts + + def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + plugin_name = super().register(plugin, name) + if plugin_name is not None: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict( + plugin=plugin, + plugin_name=plugin_name, + manager=self, + ) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return plugin_name + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: _PluggyPlugin | None = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: Config) -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible. " + "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible. " + "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, + args: Sequence[str | pathlib.Path], + pyargs: bool, + noconftest: bool, + rootpath: pathlib.Path, + confcutdir: pathlib.Path | None, + invocation_dir: pathlib.Path, + importmode: ImportMode | str, + *, + consider_namespace_packages: bool, + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + self._confcutdir = ( + absolutepath(invocation_dir / confcutdir) if confcutdir else None + ) + self._noconftest = noconftest + self._using_pyargs = pyargs + foundanchor = False + for initial_path in args: + path = str(initial_path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(invocation_dir / path) + + # Ensure we do not break if what appears to be an anchor + # is in fact a very long option (#10169, #11394). + if safe_exists(anchor): + self._try_load_conftest( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + foundanchor = True + if not foundanchor: + self._try_load_conftest( + invocation_dir, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _is_in_confcutdir(self, path: pathlib.Path) -> bool: + """Whether to consider the given path to load conftests from.""" + if self._confcutdir is None: + return True + # The semantics here are literally: + # Do not load a conftest if it is found upwards from confcut dir. + # But this is *not* the same as: + # Load only conftests from confcutdir or below. + # At first glance they might seem the same thing, however we do support use cases where + # we want to load conftests that are not found in confcutdir or below, but are found + # in completely different directory hierarchies like packages installed + # in out-of-source trees. + # (see #9767 for a regression where the logic was inverted). + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, + anchor: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + self._loadconftestmodules( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._loadconftestmodules( + x, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _loadconftestmodules( + self, + path: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + if self._noconftest: + return + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + if directory in self._dirpath2confmods: + return + + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest( + conftestpath, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + clist.append(mod) + self._dirpath2confmods[directory] = clist + + def _getconftestmodules(self, path: pathlib.Path) -> Sequence[types.ModuleType]: + directory = self._get_directory(path) + return self._dirpath2confmods.get(directory, ()) + + def _rget_with_confmod( + self, + name: str, + path: pathlib.Path, + ) -> tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, + conftestpath: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> types.ModuleType: + conftestpath_plugin_name = str(conftestpath) + existing = self.get_plugin(conftestpath_plugin_name) + if existing is not None: + return cast(types.ModuleType, existing) + + # conftest.py files there are not in a Python package all have module + # name "conftest", and thus conflict with each other. Clear the existing + # before loading the new one, otherwise the existing one will be + # returned from the module cache. + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + try: + del sys.modules[conftestpath.stem] + except KeyError: + pass + + try: + mod = import_path( + conftestpath, + mode=importmode, + root=rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + except Exception as e: + assert e.__traceback__ is not None + raise ConftestImportFailure(conftestpath, cause=e) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + if mod in mods: + raise AssertionError( + f"While trying to load conftest path {conftestpath!s}, " + f"found that the module {mod} is already loaded with path {mod.__file__}. " + "This is not supposed to happen. Please report this issue to pytest." + ) + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod, registration_name=conftestpath_plugin_name) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: pathlib.Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + parg = parg.strip() + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError(f"plugin {name} cannot be disabled") + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. + self.unblock(name) + if not name.startswith("pytest_"): + self.unblock("pytest_" + name) + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest( + self, conftestmodule: types.ModuleType, registration_name: str + ) -> None: + """:meta private:""" + self.register(conftestmodule, name=registration_name) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: None | types.ModuleType | str | Sequence[str] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str), ( + f"module name as text required, got {modname!r}" + ) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: None | types.ModuleType | str | Sequence[str], +) -> list[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + f"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: {specs!r}" + ) + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + # as well as editable installation finder modules made by setuptools + if module_name != "setup" and not module_name.startswith("__editable__"): + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @dataclasses.dataclass(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + configuration option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: tuple[str, ...] + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Sequence[str | _PluggyPlugin] | None + """Extra plugins, might be `None`.""" + dir: pathlib.Path + """The directory from which :func:`pytest.main` was invoked.""" + + def __init__( + self, + *, + args: Iterable[str], + plugins: Sequence[str | _PluggyPlugin] | None, + dir: pathlib.Path, + ) -> None: + object.__setattr__(self, "args", tuple(args)) + object.__setattr__(self, "plugins", plugins) + object.__setattr__(self, "dir", dir) + + class ArgsSource(enum.Enum): + """Indicates the source of the test arguments. + + .. versionadded:: 7.2 + """ + + #: Command line arguments. + ARGS = enum.auto() + #: Invocation directory. + INVOCATION_DIR = enum.auto() + INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias + #: 'testpaths' configuration value. + TESTPATHS = enum.auto() + + # Set by cacheprovider plugin. + cache: Cache + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: InvocationParams | None = None, + ) -> None: + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=pathlib.Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + self._parser = Parser( + usage=f"%(prog)s [options] [{FILE_OR_DIR}] [{FILE_OR_DIR}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] + self._inicache: dict[str, Any] = {} + self._opt2dest: dict[str, str] = {} + self._cleanup_stack = contextlib.ExitStack() + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + self.args_source = Config.ArgsSource.ARGS + self.args: list[str] = [] + + @property + def rootpath(self) -> pathlib.Path: + """The path to the :ref:`rootdir `. + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> pathlib.Path | None: + """The path to the :ref:`configfile `. + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure). + """ + self._cleanup_stack.callback(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + try: + if self._configured: + self._configured = False + try: + self.hook.pytest_unconfigure(config=self) + finally: + self.hook.pytest_configure._call_history = [] + finally: + try: + self._cleanup_stack.close() + finally: + self._cleanup_stack = contextlib.ExitStack() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter | None = self.pluginmanager.get_plugin( + "terminalreporter" + ) + assert terminalreporter is not None + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: list[str] + ) -> Config: + try: + self.parse(args) + except UsageError: + # Handle `--version --version` and `--help` here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import show_version_verbose + + # Note that `--version` (single argument) is handled early by `Config.main()`, so the only + # way we are reaching this point is via `--version --version`. + show_version_verbose(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser.optparser.print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: argparse.Namespace | None = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write(f"INTERNALERROR> {line}\n") + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + base_path_part, *nodeid_part = nodeid.split("::") + # Only process path part + fullpath = self.rootpath / base_path_part + relative_path = bestrelpath(self.invocation_params.dir, fullpath) + + nodeid = "::".join([relative_path, *nodeid_part]) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict: Mapping[str, Any], args: list[str]) -> Config: + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: Argument) -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: Config) -> None: + # We haven't fully parsed the command line arguments yet, so + # early_config.args it not set yet. But we need it for + # discovering the initial conftests. So "pre-run" the logic here. + # It will be done for real in `parse()`. + args, _args_source = early_config._decide_args( + args=early_config.known_args_namespace.file_or_dir, + pyargs=early_config.known_args_namespace.pyargs, + testpaths=early_config.getini("testpaths"), + invocation_dir=early_config.invocation_params.dir, + rootpath=early_config.rootpath, + warn=False, + ) + self.pluginmanager._set_initial_conftests( + args=args, + pyargs=early_config.known_args_namespace.pyargs, + noconftest=early_config.known_args_namespace.noconftest, + rootpath=early_config.rootpath, + confcutdir=early_config.known_args_namespace.confcutdir, + invocation_dir=early_config.invocation_params.dir, + importmode=early_config.known_args_namespace.importmode, + consider_namespace_packages=early_config.getini( + "consider_namespace_packages" + ), + ) + + def _consider_importhook(self) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + mode = getattr(self.known_args_namespace, "assertmode", "plain") + + disable_autoload = getattr( + self.known_args_namespace, "disable_plugin_autoload", False + ) or bool(os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD")) + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook, disable_autoload) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite( + self, hook: AssertionRewritingHook, disable_autoload: bool + ) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if disable_autoload: + # We don't autoload from distribution package entry points, + # no need to continue. + return + + package_files = ( + str(file) + for dist in importlib.metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _configure_python_path(self) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(self.getini("pythonpath")): + sys.path.insert(0, str(path)) + self.add_cleanup(self._unconfigure_python_path) + + def _unconfigure_python_path(self) -> None: + for path in self.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) + + def _validate_args(self, args: list[str], via: str) -> list[str]: + """Validate known args.""" + self._parser.extra_info["config source"] = via + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + self._parser.extra_info.pop("config source", None) + + return args + + def _decide_args( + self, + *, + args: list[str], + pyargs: bool, + testpaths: list[str], + invocation_dir: pathlib.Path, + rootpath: pathlib.Path, + warn: bool, + ) -> tuple[list[str], ArgsSource]: + """Decide the args (initial paths/nodeids) to use given the relevant inputs. + + :param warn: Whether can issue warnings. + + :returns: The args and the args source. Guaranteed to be non-empty. + """ + if args: + source = Config.ArgsSource.ARGS + result = args + else: + if invocation_dir == rootpath: + source = Config.ArgsSource.TESTPATHS + if pyargs: + result = testpaths + else: + result = [] + for path in testpaths: + result.extend(sorted(glob.iglob(path, recursive=True))) + if testpaths and not result: + if warn: + warning_text = ( + "No files were found in testpaths; " + "consider removing or adjusting your testpaths configuration. " + "Searching recursively from the current directory instead." + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3 + ) + else: + result = [] + if not result: + source = Config.ArgsSource.INVOCATION_DIR + result = [str(invocation_dir)] + return result, source + + @hookimpl(wrapper=True) + def pytest_collection(self) -> Generator[None, object, object]: + # Validate invalid configuration keys after collection is done so we + # take in account options added by late-loading conftest files. + try: + return (yield) + finally: + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver_ini_value = self.inicfg.get("minversion", None) + minver = minver_ini_value.value if minver_ini_value is not None else None + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' must be a single value" + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' requires pytest-{minver}, actual pytest-{pytest.__version__}'" + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.requirements import InvalidRequirement + from packaging.requirements import Requirement + from packaging.version import Version + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + strict_config = self.getini("strict_config") + if strict_config is None: + strict_config = self.getini("strict") + if strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> set[str]: + known_keys = self._parser._inidict.keys() | self._parser._ini_aliases.keys() + return self.inicfg.keys() - known_keys + + def parse(self, args: list[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert self.args == [], ( + "can only parse cmdline args at most once per Config object" + ) + + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + + ns = self._parser.parse_known_args(args, namespace=copy.copy(self.option)) + rootpath, inipath, inicfg, ignored_config_files = determine_setup( + inifile=ns.inifilename, + override_ini=ns.override_ini, + args=ns.file_or_dir, + rootdir_cmd_arg=ns.rootdir or None, + invocation_dir=self.invocation_params.dir, + ) + self._rootpath = rootpath + self._inipath = inipath + self._ignored_config_files = ignored_config_files + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "pythonpath", type="paths", help="Add paths to sys.path", default=[] + ) + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook() + self._configure_python_path() + self.pluginmanager.consider_preparse(args, exclude_only=False) + if ( + not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") + and not self.known_args_namespace.disable_plugin_autoload + ): + # Autoloading from distribution package entry point has + # not been disabled. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + # Otherwise only plugins explicitly specified in PYTEST_PLUGINS + # are going to be loaded. + self.pluginmanager.consider_env() + + self._parser.parse_known_args(args, namespace=self.known_args_namespace) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let it pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + try: + self._parser.parse(args, namespace=self.option) + except PrintHelp: + return + + self.args, self.args_source = self._decide_args( + args=getattr(self.option, FILE_OR_DIR), + pyargs=self.option.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hook wrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to a configuration option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str) -> Any: + """Return configuration value the an :ref:`configuration file `. + + If a configuration value is not defined in a + :ref:`configuration file `, then the ``default`` value + provided while registering the configuration through + :func:`parser.addini ` will be returned. + Please note that you can even provide ``None`` as a valid + default value. + + If ``default`` is not provided while registering using + :func:`parser.addini `, then a default value + based on the ``type`` parameter passed to + :func:`parser.addini ` will be returned. + The default values based on ``type`` are: + ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` + ``bool`` : ``False`` + ``string`` : empty string ``""`` + ``int`` : ``0`` + ``float`` : ``0.0`` + + If neither the ``default`` nor the ``type`` parameter is passed + while registering the configuration through + :func:`parser.addini `, then the configuration + is treated as a string and a default empty string '' is returned. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + canonical_name = self._parser._ini_aliases.get(name, name) + try: + return self._inicache[canonical_name] + except KeyError: + pass + self._inicache[canonical_name] = val = self._getini(canonical_name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: object): + msg = ( + f"Option {name} has unknown configuration type {type} with value {value!r}" + ) + raise ValueError(msg) # pragma: no cover + + def _getini(self, name: str): + # If this is an alias, resolve to canonical name. + canonical_name = self._parser._ini_aliases.get(name, name) + + try: + _description, type, default = self._parser._inidict[canonical_name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + + # Collect all possible values (canonical name + aliases) from inicfg. + # Each candidate is (ConfigValue, is_canonical). + candidates = [] + if canonical_name in self.inicfg: + candidates.append((self.inicfg[canonical_name], True)) + for alias, target in self._parser._ini_aliases.items(): + if target == canonical_name and alias in self.inicfg: + candidates.append((self.inicfg[alias], False)) + + if not candidates: + return default + + # Pick the best candidate based on precedence: + # 1. CLI override takes precedence over file, then + # 2. Canonical name takes precedence over alias. + selected = max(candidates, key=lambda x: (x[0].origin == "override", x[1]))[0] + value = selected.value + mode = selected.mode + + if mode == "ini": + # In ini mode, values are always str | list[str]. + assert isinstance(value, (str, list)) + return self._getini_ini(name, canonical_name, type, value, default) + elif mode == "toml": + return self._getini_toml(name, canonical_name, type, value, default) + else: + assert_never(mode) + + def _getini_ini( + self, + name: str, + canonical_name: str, + type: str, + value: str | list[str], + default: Any, + ): + """Handle config values read in INI mode. + + In INI mode, values are stored as str or list[str] only, and coerced + from string based on the registered type. + """ + # Note: some coercions are only required if we are reading from .ini + # files, because the file format doesn't contain type information, but + # when reading from toml (in ini mode) we will get either str or list of + # str values (see load_config_dict_from_file). For example: + # + # ini: + # a_line_list = "tests acceptance" + # + # in this case, we need to split the string to obtain a list of strings. + # + # toml (ini mode): + # a_line_list = ["tests", "acceptance"] + # + # in this case, we already have a list ready to use. + if type == "paths": + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type == "int": + if not isinstance(value, str): + raise TypeError( + f"Expected an int string for option {name} of type integer, but got: {value!r}" + ) from None + return int(value) + elif type == "float": + if not isinstance(value, str): + raise TypeError( + f"Expected a float string for option {name} of type float, but got: {value!r}" + ) from None + return float(value) + else: + return self._getini_unknown_type(name, type, value) + + def _getini_toml( + self, + name: str, + canonical_name: str, + type: str, + value: object, + default: Any, + ): + """Handle TOML config values with strict type validation and no coercion. + + In TOML mode, values already have native types from TOML parsing. + We validate types match expectations exactly, including list items. + """ + value_type = builtins.type(value).__name__ + if type == "paths": + # Expect a list of strings. + if not isinstance(value, list): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list for type 'paths', " + f"got {value_type}: {value!r}" + ) + for i, item in enumerate(value): + if not isinstance(item, str): + item_type = builtins.type(item).__name__ + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list of strings, " + f"but item at index {i} is {item_type}: {item!r}" + ) + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + return [dp / x for x in value] + elif type in {"args", "linelist"}: + # Expect a list of strings. + if not isinstance(value, list): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list for type '{type}', " + f"got {value_type}: {value!r}" + ) + for i, item in enumerate(value): + if not isinstance(item, str): + item_type = builtins.type(item).__name__ + raise TypeError( + f"{self.inipath}: config option '{name}' expects a list of strings, " + f"but item at index {i} is {item_type}: {item!r}" + ) + return list(value) + elif type == "bool": + # Expect a boolean. + if not isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a bool, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "int": + # Expect an integer (but not bool, which is a subclass of int). + if not isinstance(value, int) or isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects an int, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "float": + # Expect a float or integer only. + if not isinstance(value, (float, int)) or isinstance(value, bool): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a float, " + f"got {value_type}: {value!r}" + ) + return value + elif type == "string": + # Expect a string. + if not isinstance(value, str): + raise TypeError( + f"{self.inipath}: config option '{name}' expects a string, " + f"got {value_type}: {value!r}" + ) + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: pathlib.Path + ) -> list[pathlib.Path] | None: + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + assert mod.__file__ is not None + modpath = pathlib.Path(mod.__file__).parent + values: list[pathlib.Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = pathlib.Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def getoption(self, name: str, default: Any = notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Fallback value if no option of that name is **declared** via :hook:`pytest_addoption`. + Note this parameter will be ignored when the option is **declared** even if the option's value is ``None``. + :param skip: If ``True``, raise :func:`pytest.skip` if option is undeclared or has a ``None`` value. + Note that even if ``True``, if a default was specified it will be returned instead of a skip. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). + VERBOSITY_ASSERTIONS: Final = "assertions" + #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). + VERBOSITY_TEST_CASES: Final = "test_cases" + #: Verbosity type for failed subtests (see :confval:`verbosity_subtests`). + VERBOSITY_SUBTESTS: Final = "subtests" + + _VERBOSITY_INI_DEFAULT: Final = "auto" + + def get_verbosity(self, verbosity_type: str | None = None) -> int: + r"""Retrieve the verbosity level for a fine-grained verbosity type. + + :param verbosity_type: Verbosity type to get level for. If a level is + configured for the given type, that value will be returned. If the + given type is not a known verbosity type, the global verbosity + level will be returned. If the given type is None (default), the + global verbosity level will be returned. + + To configure a level for a fine-grained verbosity type, the + configuration file should have a setting for the configuration name + and a numeric value for the verbosity level. A special value of "auto" + can be used to explicitly use the global verbosity level. + + Example: + + .. tab:: toml + + .. code-block:: toml + + [tool.pytest] + verbosity_assertions = 2 + + .. tab:: ini + + .. code-block:: ini + + [pytest] + verbosity_assertions = 2 + + .. code-block:: console + + pytest -v + + .. code-block:: python + + print(config.get_verbosity()) # 1 + print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 + """ + global_level = self.getoption("verbose", default=0) + assert isinstance(global_level, int) + if verbosity_type is None: + return global_level + + ini_name = Config._verbosity_ini_name(verbosity_type) + if ini_name not in self._parser._inidict: + return global_level + + level = self.getini(ini_name) + if level == Config._VERBOSITY_INI_DEFAULT: + return global_level + + return int(level) + + @staticmethod + def _verbosity_ini_name(verbosity_type: str) -> str: + return f"verbosity_{verbosity_type}" + + @staticmethod + def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: + """Add a output verbosity configuration option for the given output type. + + :param parser: Parser for command line arguments and config-file values. + :param verbosity_type: Fine-grained verbosity category. + :param help: Description of the output this type controls. + + The value should be retrieved via a call to + :py:func:`config.get_verbosity(type) `. + """ + parser.addini( + Config._verbosity_ini_name(verbosity_type), + help=help, + type="string", + default=Config._VERBOSITY_INI_DEFAULT, + ) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: TextIO | None = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> tuple[warnings._ActionKind, str, type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: warnings._ActionKind = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) from None + try: + category: type[Warning] = _resolve_warning_category(category_) + except ImportError: + raise + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) from None + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) from None + else: + lineno = 0 + try: + re.compile(message) + re.compile(module) + except re.error as e: + raise UsageError( + error_template.format(error=f"Invalid regex {e.pattern!r}: {e}") + ) from None + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + try: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + except ImportError as e: + warnings.warn( + f"Failed to import filter module '{e.name}': {arg}", PytestConfigWarning + ) + continue + + for arg in cmdline_filters: + try: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) + except ImportError as e: + warnings.warn( + f"Failed to import filter module '{e.name}': {arg}", PytestConfigWarning + ) + continue diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/argparsing.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..9954088 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,578 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Mapping +from collections.abc import Sequence +import os +import sys +from typing import Any +from typing import final +from typing import Literal +from typing import NoReturn + +from .exceptions import UsageError +import _pytest._io +from _pytest.deprecated import check_ispytest + + +FILE_OR_DIR = "file_or_dir" + + +class NotSet: + def __repr__(self) -> str: + return "" + + +NOT_SET = NotSet() + + +@final +class Parser: + """Parser for command line arguments and config-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + def __init__( + self, + usage: str | None = None, + processopt: Callable[[Argument], None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + from _pytest._argcomplete import filescompleter + + self._processopt = processopt + self.extra_info: dict[str, Any] = {} + self.optparser = PytestArgumentParser(self, usage, self.extra_info) + anonymous_arggroup = self.optparser.add_argument_group("Custom options") + self._anonymous = OptionGroup( + anonymous_arggroup, "_anonymous", self, _ispytest=True + ) + self._groups = [self._anonymous] + file_or_dir_arg = self.optparser.add_argument(FILE_OR_DIR, nargs="*") + file_or_dir_arg.completer = filescompleter # type: ignore + + self._inidict: dict[str, tuple[str, str, Any]] = {} + # Maps alias -> canonical name. + self._ini_aliases: dict[str, str] = {} + + @property + def prog(self) -> str: + return self.optparser.prog + + @prog.setter + def prog(self, value: str) -> None: + self.optparser.prog = value + + def processoption(self, option: Argument) -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: str | None = None + ) -> OptionGroup: + """Get (or create) a named option Group. + + :param name: Name of the option group. + :param description: Long description for --help output. + :param after: Name of another group, used for ordering --help output. + :returns: The option group. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest --help``. + """ + for group in self._groups: + if group.name == name: + return group + + arggroup = self.optparser.add_argument_group(description or name) + group = OptionGroup(arggroup, name, self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + # argparse doesn't provide a way to control `--help` order, so must + # access its internals ☹. + self.optparser._action_groups.insert(i + 1, self.optparser._action_groups.pop()) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Parse the arguments. + + Unlike ``parse_known_args`` and ``parse_known_and_unknown_args``, + raises PrintHelp on `--help` and UsageError on unknown flags + + :meta private: + """ + from _pytest._argcomplete import try_argcomplete + + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + if namespace is None: + namespace = argparse.Namespace() + try: + namespace._raise_print_help = True + return self.optparser.parse_intermixed_args(strargs, namespace=namespace) + finally: + del namespace._raise_print_help + + def parse_known_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Parse the known arguments at this point. + + :returns: An argparse namespace object. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> tuple[argparse.Namespace, list[str]]: + """Parse the known arguments at this point, and also return the + remaining unknown flag arguments. + + :returns: + A tuple containing an argparse namespace object for the known + arguments, and a list of unknown flag arguments. + """ + strargs = [os.fspath(x) for x in args] + if sys.version_info < (3, 12): + # Older argparse have a bugged parse_known_intermixed_args. + namespace, unknown = self.optparser.parse_known_args(strargs, namespace) + assert namespace is not None + file_or_dir = getattr(namespace, FILE_OR_DIR) + unknown_flags: list[str] = [] + for arg in unknown: + (unknown_flags if arg.startswith("-") else file_or_dir).append(arg) + return namespace, unknown_flags + else: + return self.optparser.parse_known_intermixed_args(strargs, namespace) + + def addini( + self, + name: str, + help: str, + type: Literal[ + "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" + ] + | None = None, + default: Any = NOT_SET, + *, + aliases: Sequence[str] = (), + ) -> None: + """Register a configuration file option. + + :param name: + Name of the configuration. + :param type: + Type of the configuration. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + * ``int``: an integer + * ``float``: a floating-point number + + .. versionadded:: 8.4 + + The ``float`` and ``int`` types. + + For ``paths`` and ``pathlist`` types, they are considered relative to the config-file. + In case the execution is happening without a config-file defined, + they will be considered relative to the current working directory (for example with ``--override-ini``). + + .. versionadded:: 7.0 + The ``paths`` variable type. + + .. versionadded:: 8.1 + Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of a config-file. + + Defaults to ``string`` if ``None`` or not passed. + :param default: + Default value if no config-file option exists but is queried. + :param aliases: + Additional names by which this option can be referenced. + Aliases resolve to the canonical name. + + .. versionadded:: 9.0 + The ``aliases`` parameter. + + The value of configuration keys can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in ( + None, + "string", + "paths", + "pathlist", + "args", + "linelist", + "bool", + "int", + "float", + ) + if type is None: + type = "string" + if default is NOT_SET: + default = get_ini_default_for_type(type) + + self._inidict[name] = (help, type, default) + + for alias in aliases: + if alias in self._inidict: + raise ValueError( + f"alias {alias!r} conflicts with existing configuration option" + ) + if (already := self._ini_aliases.get(alias)) is not None: + raise ValueError(f"{alias!r} is already an alias of {already!r}") + self._ini_aliases[alias] = name + + +def get_ini_default_for_type( + type: Literal[ + "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" + ], +) -> Any: + """ + Used by addini to get the default value for a given config option type, when + default is not supplied. + """ + if type in ("paths", "pathlist", "args", "linelist"): + return [] + elif type == "bool": + return False + elif type == "int": + return 0 + elif type == "float": + return 0.0 + else: + return "" + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Argument | str) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store params in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: list[str] = [] + self._long_opts: list[str] = [] + try: + self.type = attrs["type"] + except KeyError: + pass + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: str | None = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> list[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + for attr in ("default", "dest", "help", self.dest): + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + f"invalid option string {opt!r}: " + "must be at least two characters long", + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + f"invalid short option string {opt!r}: " + "must be of the form -x, (x any non-dash char)", + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + f"invalid long option string {opt!r}: " + "must start with --, followed by non-dash", + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: list[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + arggroup: argparse._ArgumentGroup, + name: str, + parser: Parser | None, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._arggroup = arggroup + self.name = name + self.options: list[Argument] = [] + self.parser = parser + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + """ + conflict = set(opts).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError(f"option names {conflict} already added") + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *opts: str, **attrs: Any) -> None: + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: Argument, shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + + if self.parser: + self.parser.processoption(option) + + self._arggroup.add_argument(*option.names(), **option.attrs()) + self.options.append(option) + + +class PytestArgumentParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + usage: str | None, + extra_info: dict[str, str], + ) -> None: + self._parser = parser + super().__init__( + usage=usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + fromfile_prefix_chars="@", + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info + + def error(self, message: str) -> NoReturn: + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + if self.extra_info: + msg += "\n" + "\n".join( + f" {k}: {v}" for k, v in sorted(self.extra_info.items()) + ) + raise UsageError(self.format_usage() + msg) + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: str | None = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + f'long optional argument without "--": [{option}]', option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines + + +class OverrideIniAction(argparse.Action): + """Custom argparse action that makes a CLI flag equivalent to overriding an + option, in addition to behaving like `store_true`. + + This can simplify things since code only needs to inspect the config option + and not consider the CLI flag. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + nargs: int | str | None = None, + *args, + ini_option: str, + ini_value: str, + **kwargs, + ) -> None: + super().__init__(option_strings, dest, 0, *args, **kwargs) + self.ini_option = ini_option + self.ini_value = ini_value + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + *args, + **kwargs, + ) -> None: + setattr(namespace, self.dest, True) + current_overrides = getattr(namespace, "override_ini", None) + if current_overrides is None: + current_overrides = [] + current_overrides.append(f"{self.ini_option}={self.ini_value}") + setattr(namespace, "override_ini", current_overrides) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/compat.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/compat.py new file mode 100644 index 0000000..21eab4c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/compat.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections.abc import Mapping +import functools +from pathlib import Path +from typing import Any +import warnings + +import pluggy + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG + + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks: Mapping[str, tuple[str, str]] = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_relay: pluggy.HookRelay) -> None: + self._hook_relay = hook_relay + + def __dir__(self) -> list[str]: + return dir(self._hook_relay) + + def __getattr__(self, key: str) -> pluggy.HookCaller: + hook: pluggy.HookCaller = getattr(self._hook_relay, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @functools.wraps(hook) + def fixed_hook(**kw: Any) -> Any: + path_value: Path | None = kw.pop(path_var, None) + fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.name = hook.name # type: ignore[attr-defined] + fixed_hook.spec = hook.spec # type: ignore[attr-defined] + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook # type: ignore[return-value] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/exceptions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..d84a9ea --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from typing import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + __module__ = "pytest" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/findpaths.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..3c628a0 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,350 @@ +from __future__ import annotations + +from collections.abc import Iterable +from collections.abc import Sequence +from dataclasses import dataclass +from dataclasses import KW_ONLY +import os +from pathlib import Path +import sys +from typing import Literal +from typing import TypeAlias + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.pathlib import safe_exists + + +@dataclass(frozen=True) +class ConfigValue: + """Represents a configuration value with its origin and parsing mode. + + This allows tracking whether a value came from a configuration file + or from a CLI override (--override-ini), which is important for + determining precedence when dealing with ini option aliases. + + The mode tracks the parsing mode/data model used for the value: + - "ini": from INI files or [tool.pytest.ini_options], where the only + supported value types are `str` or `list[str]`. + - "toml": from TOML files (not in INI mode), where native TOML types + are preserved. + """ + + value: object + _: KW_ONLY + origin: Literal["file", "override"] + mode: Literal["ini", "toml"] + + +ConfigDict: TypeAlias = dict[str, ConfigValue] + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> ConfigDict | None: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return { + k: ConfigValue(v, origin="file", mode="ini") + for k, v in iniconfig["pytest"].items() + } + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name in {"pytest.ini", ".pytest.ini"}: + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return { + k: ConfigValue(v, origin="file", mode="ini") + for k, v in iniconfig["tool:pytest"].items() + } + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest] table (toml mode) + # or [tool.pytest.ini_options] table (ini mode) for pyproject.toml, + # or [pytest] table (toml mode) for pytest.toml/.pytest.toml. + elif filepath.suffix == ".toml": + if sys.version_info >= (3, 11): + import tomllib + else: + import tomli as tomllib + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + # pytest.toml and .pytest.toml use [pytest] table directly. + if filepath.name in ("pytest.toml", ".pytest.toml"): + pytest_config = config.get("pytest", {}) + if pytest_config: + # TOML mode - preserve native TOML types. + return { + k: ConfigValue(v, origin="file", mode="toml") + for k, v in pytest_config.items() + } + # "pytest.toml" files are always the source of configuration, even if empty. + return {} + + # pyproject.toml uses [tool.pytest] or [tool.pytest.ini_options]. + else: + tool_pytest = config.get("tool", {}).get("pytest", {}) + + # Check for toml mode config: [tool.pytest] with content outside of ini_options. + toml_config = {k: v for k, v in tool_pytest.items() if k != "ini_options"} + # Check for ini mode config: [tool.pytest.ini_options]. + ini_config = tool_pytest.get("ini_options", None) + + if toml_config and ini_config: + raise UsageError( + f"{filepath}: Cannot use both [tool.pytest] (native TOML types) and " + "[tool.pytest.ini_options] (string-based INI format) simultaneously. " + "Please use [tool.pytest] with native TOML types (recommended) " + "or [tool.pytest.ini_options] for backwards compatibility." + ) + + if toml_config: + # TOML mode - preserve native TOML types. + return { + k: ConfigValue(v, origin="file", mode="toml") + for k, v in toml_config.items() + } + + elif ini_config is not None: + # INI mode - TOML supports richer data types than INI files, but we need to + # convert all scalar values to str for compatibility with the INI system. + def make_scalar(v: object) -> str | list[str]: + return v if isinstance(v, list) else str(v) + + return { + k: ConfigValue(make_scalar(v), origin="file", mode="ini") + for k, v in ini_config.items() + } + + return None + + +def locate_config( + invocation_dir: Path, + args: Iterable[Path], +) -> tuple[Path | None, Path | None, ConfigDict, Sequence[str]]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict, ignored-config-files), where + ignored-config-files is a list of config basenames found that contain + pytest configuration but were ignored.""" + config_names = [ + "pytest.toml", + ".pytest.toml", + "pytest.ini", + ".pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [invocation_dir] + found_pyproject_toml: Path | None = None + ignored_config_files: list[str] = [] + + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + if p.name == "pyproject.toml" and found_pyproject_toml is None: + found_pyproject_toml = p + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + index = config_names.index(config_name) + for remainder in config_names[index + 1 :]: + p2 = base / remainder + if ( + p2.is_file() + and load_config_dict_from_file(p2) is not None + ): + ignored_config_files.append(remainder) + return base, p, ini_config, ignored_config_files + if found_pyproject_toml is not None: + return found_pyproject_toml.parent, found_pyproject_toml, {}, [] + return None, None, {}, [] + + +def get_common_ancestor( + invocation_dir: Path, + paths: Iterable[Path], +) -> Path: + common_ancestor: Path | None = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = invocation_dir + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> list[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +def parse_override_ini(override_ini: Sequence[str] | None) -> ConfigDict: + """Parse the -o/--override-ini command line arguments and return the overrides. + + :raises UsageError: + If one of the values is malformed. + """ + overrides = {} + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in override_ini or (): + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + f"-o/--override-ini expects option=value style (got: {ini_config!r})." + ) from e + else: + overrides[key] = ConfigValue(user_ini_value, origin="override", mode="ini") + return overrides + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + *, + inifile: str | None, + override_ini: Sequence[str] | None, + args: Sequence[str], + rootdir_cmd_arg: str | None, + invocation_dir: Path, +) -> tuple[Path, Path | None, ConfigDict, Sequence[str]]: + """Determine the rootdir, inifile and ini configuration values from the + command line arguments. + + :param inifile: + The `--inifile` command line argument, if given. + :param override_ini: + The -o/--override-ini command line arguments, if given. + :param args: + The free command line arguments. + :param rootdir_cmd_arg: + The `--rootdir` command line argument, if given. + :param invocation_dir: + The working directory when pytest was invoked. + + :raises UsageError: + """ + rootdir = None + dirs = get_dirs_from_args(args) + ignored_config_files: Sequence[str] = [] + + if inifile: + inipath_ = absolutepath(inifile) + inipath: Path | None = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(invocation_dir, dirs) + rootdir, inipath, inicfg, ignored_config_files = locate_config( + invocation_dir, [ancestor] + ) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg, _ = locate_config(invocation_dir, dirs) + if rootdir is None: + rootdir = get_common_ancestor( + invocation_dir, [invocation_dir, ancestor] + ) + if is_fs_root(rootdir): + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + f"Directory '{rootdir}' not found. Check your '--rootdir' option." + ) + + ini_overrides = parse_override_ini(override_ini) + inicfg.update(ini_overrides) + + assert rootdir is not None + return rootdir, inipath, inicfg, ignored_config_files + + +def is_fs_root(p: Path) -> bool: + r""" + Return True if the given path is pointing to the root of the + file system ("/" on Unix and "C:\\" on Windows for example). + """ + return os.path.splitdrive(str(p))[1] == os.sep diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/debugging.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..de1b268 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/debugging.py @@ -0,0 +1,407 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Interactive debugging with PDB, the Python Debugger.""" + +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Generator +import functools +import sys +import types +from typing import Any +import unittest + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureManager +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="Start the interactive Python debugger on errors or KeyboardInterrupt", + ) + group.addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="Specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group.addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: PytestPluginManager | None = None + _config: Config | None = None + _saved: list[ + tuple[Callable[..., None], PytestPluginManager | None, Config | None] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: tuple[type[Any], type[Any]] | None = None + + @classmethod + def _is_capturing(cls, capman: CaptureManager | None) -> str | bool: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: CaptureManager | None): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: CaptureManager | None): + import _pytest.config + + class PytestPdbWrapper(pdb_cls): + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + if hasattr(pdb_cls, "do_debug"): + do_debug.__doc__ = pdb_cls.do_debug.__doc__ + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + f"PDB continue (IO-capturing resumed for {capturing})", + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + if hasattr(pdb_cls, "do_continue"): + do_continue.__doc__ = pdb_cls.do_continue.__doc__ + + do_c = do_cont = do_continue + + def do_quit(self, arg): + # Raise Exit outcome when quit command is used in pdb. + # + # This is a bit of a hack - it would be better if BdbQuit + # could be handled, but this would require to wrap the + # whole pytest run, and adjust the report etc. + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + if hasattr(pdb_cls, "do_quit"): + do_quit.__doc__ = pdb_cls.do_quit.__doc__ + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: CaptureManager | None = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + f"PDB {method} (IO-capturing turned off for {capturing})", + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: CallInfo[Any], report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + + if not isinstance(call.excinfo.value, unittest.SkipTest): + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + exc_or_tb = _postmortem_exc_or_tb(excinfo) + post_mortem(exc_or_tb) + + +class PdbTrace: + @hookimpl(wrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, object, object]: + wrap_pytest_function_for_tracing(pyfuncitem) + return (yield) + + +def wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs) -> None: + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we reuse the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb_or_exc = _postmortem_exc_or_tb(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb_or_exc) + return rep + + +def _postmortem_exc_or_tb( + excinfo: ExceptionInfo[BaseException], +) -> types.TracebackType | BaseException: + from doctest import UnexpectedException + + get_exc = sys.version_info >= (3, 13) + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + underlying_exc = excinfo.value + if get_exc: + return underlying_exc.exc_info[1] + + return underlying_exc.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + cause = excinfo.value.cause + if get_exc: + return cause + + assert cause.__traceback__ is not None + return cause.__traceback__ + else: + assert excinfo._excinfo is not None + if get_exc: + return excinfo._excinfo[1] + + return excinfo._excinfo[2] + + +def post_mortem(tb_or_exc: types.TracebackType | BaseException) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, tb_or_exc) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/deprecated.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..cb5d2e9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/deprecated.py @@ -0,0 +1,99 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" + +from __future__ import annotations + +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import PytestRemovedIn10Warning +from _pytest.warning_types import UnformattedWarning + + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", + "pytest_subtests", +} + + +# This could have been removed pytest 8, but it's harmless and common, so no rush to remove. +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +HOOK_LEGACY_MARKING = UnformattedWarning( + PytestDeprecationWarning, + "The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n" + "Please use the pytest.hook{type}({hook_opts}) decorator instead\n" + " to configure the hooks.\n" + " See https://docs.pytest.org/en/latest/deprecations.html" + "#configuring-hook-specs-impls-using-markers", +) + +MARKED_FIXTURE = PytestRemovedIn9Warning( + "Marks applied to fixtures have no effect\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" +) + +MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES = PytestRemovedIn10Warning( + "monkeypatch.syspath_prepend() called with pkg_resources legacy namespace packages detected.\n" + "Legacy namespace packages (using pkg_resources.declare_namespace) are deprecated.\n" + "Please use native namespace packages (PEP 420) instead.\n" + "See https://docs.pytest.org/en/stable/deprecations.html#monkeypatch-fixup-namespace-packages" +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/doctest.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..cd255f5 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/doctest.py @@ -0,0 +1,736 @@ +# mypy: allow-untyped-defs +"""Discover and run doctests in modules and test files.""" + +from __future__ import annotations + +import bdb +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +from contextlib import contextmanager +import functools +import inspect +import os +from pathlib import Path +import platform +import re +import sys +import traceback +import types +from typing import Any +from typing import TYPE_CHECKING +import warnings + +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import fixture +from _pytest.fixtures import TopRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.python import Module +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import doctest + + from typing_extensions import Self + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: type[doctest.OutputChecker] | None = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "Option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "Encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="Run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="Choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="Doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="Ignore doctest collection errors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="For a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> DoctestModule | DoctestTextfile | None: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + return DoctestModule.from_parent(parent, path=file_path) + elif _is_doctest(config, file_path, parent): + return DoctestTextfile.from_parent(parent, path=file_path) + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> type[doctest.DocTestRunner]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + exc_info: tuple[type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> doctest.DocTestRunner: + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(Item): + def __init__( + self, + name: str, + parent: DoctestTextfile | DoctestModule, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + + # Stuff needed for fixture support. + self.obj = None + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: DoctestTextfile | DoctestModule, + *, + name: str, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> Self: + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] + + def setup(self) -> None: + self._request._fillfixtures() + globs = dict(getfixture=self._request.getfixturevalue) + for name, value in self._request.getfixturevalue("doctest_namespace").items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: list[doctest.DocTestFailure] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + import doctest + + failures: ( + Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None + ) = None + if isinstance( + excinfo.value, doctest.DocTestFailure | doctest.UnexpectedException + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + f"{i + test.lineno + 1:03d} {x}" for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + return self.path, self.dtest.lineno, f"[doctest] {self.name}" + + +def _get_flag_lookup() -> dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(config: Config) -> int: + optionflags_str = config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config: Config) -> bool: + continue_on_failure: bool = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self.config) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: doctest.DocTest) -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Callable[[Any], Any] | None = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + f"Got {e!r} when unwrapping {func!r}. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080", + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + py_ver_info_minor = sys.version_info[:2] + is_find_lineno_broken = ( + py_ver_info_minor < (3, 11) + or (py_ver_info_minor == (3, 11) and sys.version_info.micro < 9) + or (py_ver_info_minor == (3, 12) and sys.version_info.micro < 3) + ) + if is_find_lineno_broken: + + def _find_lineno(self, obj, source_lines): + """On older Pythons, doctest code does not take into account + `@property`. https://github.com/python/cpython/issues/61648 + + Moreover, wrapped Doctests need to be unwrapped so the correct + line number is returned. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + if sys.version_info < (3, 13): + + def _from_module(self, module, object): + """`cached_property` objects are never considered a part + of the 'current module'. As such they are skipped by doctest. + Here we override `_from_module` to check the underlying + function instead. https://github.com/python/cpython/issues/107995 + """ + if isinstance(object, functools.cached_property): + object = object.func + + # Type ignored because this is a private function. + return super()._from_module(module, object) # type: ignore[misc] + + try: + module = self.obj + except Collector.CollectError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip(f"unable to import module {self.path!r}") + else: + raise + + # While doctests currently don't support fixtures directly, we still + # need to pick up autouse fixtures. + self.session._fixturemanager.parsefactories(self) + + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self.config) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _init_checker_class() -> type[doctest.OutputChecker]: + import doctest + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: re.Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots, strict=True): + fraction: str | None = w.group("fraction") + exponent: str | None = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> doctest.OutputChecker: + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@fixture(scope="session") +def doctest_namespace() -> dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests. + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace["np"] = numpy + + For more details: :ref:`doctest_namespace`. + """ + return dict() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/faulthandler.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000..080cf58 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/faulthandler.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from collections.abc import Generator +import os +import sys + +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey +import pytest + + +fault_handler_original_stderr_fd_key = StashKey[int]() +fault_handler_stderr_fd_key = StashKey[int]() + + +def pytest_addoption(parser: Parser) -> None: + help_timeout = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish" + ) + help_exit_on_timeout = ( + "Exit the test process if a test takes more than " + "faulthandler_timeout seconds to finish" + ) + parser.addini("faulthandler_timeout", help_timeout, default=0.0) + parser.addini( + "faulthandler_exit_on_timeout", help_exit_on_timeout, type="bool", default=False + ) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + # at teardown we want to restore the original faulthandler fileno + # but faulthandler has no api to return the original fileno + # so here we stash the stderr fileno to be used at teardown + # sys.stderr and sys.__stderr__ may be closed or patched during the session + # so we can't rely on their values being good at that point (#11572). + stderr_fileno = get_stderr_fileno() + if faulthandler.is_enabled(): + config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno + config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno) + faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_fd_key in config.stash: + os.close(config.stash[fault_handler_stderr_fd_key]) + del config.stash[fault_handler_stderr_fd_key] + # Re-enable the faulthandler if it was originally enabled. + if fault_handler_original_stderr_fd_key in config.stash: + faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key]) + del config.stash[fault_handler_original_stderr_fd_key] + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, ValueError): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + assert sys.__stderr__ is not None + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +def get_exit_on_timeout_config_value(config: Config) -> bool: + exit_on_timeout = config.getini("faulthandler_exit_on_timeout") + assert isinstance(exit_on_timeout, bool) + return exit_on_timeout + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + timeout = get_timeout_config_value(item.config) + exit_on_timeout = get_exit_on_timeout_config_value(item.config) + if timeout > 0: + import faulthandler + + stderr = item.config.stash[fault_handler_stderr_fd_key] + faulthandler.dump_traceback_later(timeout, file=stderr, exit=exit_on_timeout) + try: + return (yield) + finally: + faulthandler.cancel_dump_traceback_later() + else: + return (yield) + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/fixtures.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..27846db --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/fixtures.py @@ -0,0 +1,2047 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections import defaultdict +from collections import deque +from collections import OrderedDict +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import MutableMapping +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import dataclasses +import functools +import inspect +import os +from pathlib import Path +import sys +import types +from typing import Any +from typing import cast +from typing import Final +from typing import final +from typing import Generic +from typing import NoReturn +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code import Source +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import assert_never +from _pytest.compat import get_real_func +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import signature +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.main import Session +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import _ScopeName +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import PytestWarning + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +if TYPE_CHECKING: + from _pytest.python import CallSpec2 + from _pytest.python import Function + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue", covariant=True) +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Callable[..., FixtureValue] | Callable[..., Generator[FixtureValue]] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = ( + tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ] + | tuple[ + None, + # Cache key. + object, + # The exception and the original traceback. + tuple[BaseException, types.TracebackType | None], + ] +) + + +def pytest_sessionstart(session: Session) -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package( + node: nodes.Item, + fixturedef: FixtureDef[object], +) -> nodes.Node | None: + from _pytest.python import Package + + for parent in node.iter_parents(): + if isinstance(parent, Package) and parent.nodeid == fixturedef.baseid: + return parent + return node.session + + +def get_scope_node(node: nodes.Node, scope: Scope) -> nodes.Node | None: + """Get the closest parent node (including self) which matches the given + scope. + + If there is no parent node for the scope (e.g. asking for class scope on a + Module, or on a Function when not defined in a class), returns None. + """ + import _pytest.python + + if scope is Scope.Function: + # Type ignored because this is actually safe, see: + # https://github.com/python/mypy/issues/4717 + return node.getparent(nodes.Item) # type: ignore[type-abstract] + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +# TODO: Try to use FixtureFunctionDefinition instead of the marker +def getfixturemarker(obj: object) -> FixtureFunctionMarker | None: + """Return fixturemarker or None if it doesn't exist""" + if isinstance(obj, FixtureFunctionDefinition): + return obj._fixture_function_marker + return None + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +@dataclasses.dataclass(frozen=True) +class ParamArgKey: + """A key for a high-scoped parameter used by an item. + + For use as a hashable key in `reorder_items`. The combination of fields + is meant to uniquely identify a particular "instance" of a param, + potentially shared by multiple items in a scope. + """ + + #: The param name. + argname: str + param_index: int + #: For scopes Package, Module, Class, the path to the file (directory in + #: Package's case) of the package/module/class where the item is defined. + scoped_item_path: Path | None + #: For Class scope, the class where the item is defined. + item_cls: type | None + + +_V = TypeVar("_V") +OrderedSet = dict[_V, None] + + +def get_param_argkeys(item: nodes.Item, scope: Scope) -> Iterator[ParamArgKey]: + """Return all ParamArgKeys for item matching the specified high scope.""" + assert scope is not Scope.Function + + try: + callspec: CallSpec2 = item.callspec # type: ignore[attr-defined] + except AttributeError: + return + + item_cls = None + if scope is Scope.Session: + scoped_item_path = None + elif scope is Scope.Package: + # Package key = module's directory. + scoped_item_path = item.path.parent + elif scope is Scope.Module: + scoped_item_path = item.path + elif scope is Scope.Class: + scoped_item_path = item.path + item_cls = item.cls # type: ignore[attr-defined] + else: + assert_never(scope) + + for argname in callspec.indices: + if callspec._arg2scope[argname] != scope: + continue + param_index = callspec.indices[argname] + yield ParamArgKey(argname, param_index, scoped_item_path, item_cls) + + +def reorder_items(items: Sequence[nodes.Item]) -> list[nodes.Item]: + argkeys_by_item: dict[Scope, dict[nodes.Item, OrderedSet[ParamArgKey]]] = {} + items_by_argkey: dict[Scope, dict[ParamArgKey, OrderedDict[nodes.Item, None]]] = {} + for scope in HIGH_SCOPES: + scoped_argkeys_by_item = argkeys_by_item[scope] = {} + scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(OrderedDict) + for item in items: + argkeys = dict.fromkeys(get_param_argkeys(item, scope)) + if argkeys: + scoped_argkeys_by_item[item] = argkeys + for argkey in argkeys: + scoped_items_by_argkey[argkey][item] = None + + items_set = dict.fromkeys(items) + return list( + reorder_items_atscope( + items_set, argkeys_by_item, items_by_argkey, Scope.Session + ) + ) + + +def reorder_items_atscope( + items: OrderedSet[nodes.Item], + argkeys_by_item: Mapping[Scope, Mapping[nodes.Item, OrderedSet[ParamArgKey]]], + items_by_argkey: Mapping[ + Scope, Mapping[ParamArgKey, OrderedDict[nodes.Item, None]] + ], + scope: Scope, +) -> OrderedSet[nodes.Item]: + if scope is Scope.Function or len(items) < 3: + return items + + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_by_item = argkeys_by_item[scope] + + ignore: set[ParamArgKey] = set() + items_deque = deque(items) + items_done: OrderedSet[nodes.Item] = {} + while items_deque: + no_argkey_items: OrderedSet[nodes.Item] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_items: + continue + argkeys = dict.fromkeys( + k for k in scoped_argkeys_by_item.get(item, ()) if k not in ignore + ) + if not argkeys: + no_argkey_items[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + items_deque.appendleft(i) + # Fix items_by_argkey order. + for other_scope in HIGH_SCOPES: + other_scoped_items_by_argkey = items_by_argkey[other_scope] + for argkey in argkeys_by_item[other_scope].get(i, ()): + argkey_dict = other_scoped_items_by_argkey[argkey] + if not hasattr(sys, "pypy_version_info"): + argkey_dict[i] = None + argkey_dict.move_to_end(i, last=False) + else: + # Work around a bug in PyPy: + # https://github.com/pypy/pypy/issues/5257 + # https://github.com/pytest-dev/pytest/issues/13312 + bkp = argkey_dict.copy() + argkey_dict.clear() + argkey_dict[i] = None + argkey_dict.update(bkp) + break + if no_argkey_items: + reordered_no_argkey_items = reorder_items_atscope( + no_argkey_items, argkeys_by_item, items_by_argkey, scope.next_lower() + ) + items_done.update(reordered_no_argkey_items) + if slicing_argkey is not None: + ignore.add(slicing_argkey) + return items_done + + +@dataclasses.dataclass(frozen=True) +class FuncFixtureInfo: + """Fixture-related information for a fixture-requesting item (e.g. test + function). + + This is used to examine the fixtures which an item requests statically + (known during collection). This includes autouse fixtures, fixtures + requested by the `usefixtures` marker, fixtures requested in the function + parameters, and the transitive closure of these. + + An item may also request fixtures dynamically (using `request.getfixturevalue`); + these are not reflected here. + """ + + __slots__ = ("argnames", "initialnames", "name2fixturedefs", "names_closure") + + # Fixture names that the item requests directly by function parameters. + argnames: tuple[str, ...] + # Fixture names that the item immediately requires. These include + # argnames + fixture names specified via usefixtures and via autouse=True in + # fixture definitions. + initialnames: tuple[str, ...] + # The transitive closure of the fixture names that the item requires. + # Note: can't include dynamic dependencies (`request.getfixturevalue` calls). + names_closure: list[str] + # A map from a fixture name in the transitive closure to the FixtureDefs + # matching the name which are applicable to this function. + # There may be multiple overriding fixtures with the same name. The + # sequence is ordered from furthest to closes to the function. + name2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be something not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(abc.ABC): + """The type of the ``request`` fixture. + + A request object gives access to the requesting test context and has a + ``param`` attribute in case the fixture is parametrized. + """ + + def __init__( + self, + pyfuncitem: Function, + fixturename: str | None, + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]], + fixture_defs: dict[str, FixtureDef[Any]], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + #: Fixture for which this request is being performed. + self.fixturename: Final = fixturename + self._pyfuncitem: Final = pyfuncitem + # The FixtureDefs for each fixture name requested by this item. + # Starts from the statically-known fixturedefs resolved during + # collection. Dynamically requested fixtures (using + # `request.getfixturevalue("foo")`) are added dynamically. + self._arg2fixturedefs: Final = arg2fixturedefs + # The evaluated argnames so far, mapping to the FixtureDef they resolved + # to. + self._fixture_defs: Final = fixture_defs + # Notes on the type of `param`: + # -`request.param` is only defined in parametrized fixtures, and will raise + # AttributeError otherwise. Python typing has no notion of "undefined", so + # this cannot be reflected in the type. + # - Technically `param` is only (possibly) defined on SubRequest, not + # FixtureRequest, but the typing of that is still in flux so this cheats. + # - In the future we might consider using a generic for the param type, but + # for now just using Any. + self.param: Any + + @property + def _fixturemanager(self) -> FixtureManager: + return self._pyfuncitem.session._fixturemanager + + @property + @abc.abstractmethod + def _scope(self) -> Scope: + raise NotImplementedError() + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @abc.abstractmethod + def _check_scope( + self, + requested_fixturedef: FixtureDef[object], + requested_scope: Scope, + ) -> None: + raise NotImplementedError() + + @property + def fixturenames(self) -> list[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem.fixturenames) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + @abc.abstractmethod + def node(self): + """Underlying collection node (depends on current request scope).""" + raise NotImplementedError() + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + if self.scope != "function": + return None + return getattr(self._pyfuncitem, "instance", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + mod = self._pyfuncitem.getparent(_pytest.python.Module) + assert mod is not None + return mod.obj + + @property + def path(self) -> Path: + """Path where the test function was collected.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + return self._pyfuncitem.path + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> Session: + """Pytest session object.""" + return self._pyfuncitem.session + + @abc.abstractmethod + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + raise NotImplementedError() + + def applymarker(self, marker: str | MarkDecorator) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + An object created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: str | None) -> NoReturn: + """Raise a FixtureLookupError exception. + + :param msg: + An optional custom error message. + """ + raise FixtureLookupError(None, self, msg) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + This method can be used during the test setup phase or the test run + phase, but during the test teardown phase a fixture's value may not + be available. + + :param argname: + The fixture name. + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + # Note that in addition to the use case described in the docstring, + # getfixturevalue() is also called by pytest itself during item and fixture + # setup to evaluate the fixtures that are requested statically + # (using function parameters, autouse, etc). + + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None, ( + f'The fixture value for "{argname}" is not available. ' + "This can happen when the fixture has already been torn down." + ) + return fixturedef.cached_result[0] + + def _iter_chain(self) -> Iterator[SubRequest]: + """Yield all SubRequests in the chain, from self up. + + Note: does *not* yield the TopRequest. + """ + current = self + while isinstance(current, SubRequest): + yield current + current = current._parent_request + + def _get_active_fixturedef(self, argname: str) -> FixtureDef[object]: + if argname == "request": + return RequestFixtureDef(self) + + # If we already finished computing a fixture by this name in this item, + # return it. + fixturedef = self._fixture_defs.get(argname) + if fixturedef is not None: + self._check_scope(fixturedef, fixturedef._scope) + return fixturedef + + # Find the appropriate fixturedef. + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) which was naturally + # not known at parsing/collection time. + fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem) + if fixturedefs is not None: + self._arg2fixturedefs[argname] = fixturedefs + # No fixtures defined with this name. + if fixturedefs is None: + raise FixtureLookupError(argname, self) + # The are no fixtures with this name applicable for the function. + if not fixturedefs: + raise FixtureLookupError(argname, self) + + # A fixture may override another fixture with the same name, e.g. a + # fixture in a module can override a fixture in a conftest, a fixture in + # a class can override a fixture in the module, and so on. + # An overriding fixture can request its own name (possibly indirectly); + # in this case it gets the value of the fixture it overrides, one level + # up. + # Check how many `argname`s deep we are, and take the next one. + # `fixturedefs` is sorted from furthest to closest, so use negative + # indexing to go in reverse. + index = -1 + for request in self._iter_chain(): + if request.fixturename == argname: + index -= 1 + # If already consumed all of the available levels, fail. + if -index > len(fixturedefs): + raise FixtureLookupError(argname, self) + fixturedef = fixturedefs[index] + + # Prepare a SubRequest object for calling the fixture. + try: + callspec = self._pyfuncitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # The parametrize invocation scope overrides the fixture's scope. + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + scope = fixturedef._scope + self._check_fixturedef_without_param(fixturedef) + # The parametrize invocation scope only controls caching behavior while + # allowing wider-scoped fixtures to keep depending on the parametrized + # fixture. Scope control is enforced for parametrized fixtures + # by recreating the whole fixture tree on parameter change. + # Hence `fixturedef._scope`, not `scope`. + self._check_scope(fixturedef, fixturedef._scope) + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Make sure the fixture value is cached, running it if it isn't + fixturedef.execute(request=subrequest) + + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None: + """Check that this request is allowed to execute this fixturedef without + a param.""" + funcitem = self._pyfuncitem + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" + f"Node id: {funcitem.nodeid}\n" + f"Function type: {type(funcitem).__name__}" + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str(source_path.relative_to(funcitem.config.rootpath)) + except ValueError: + source_path_str = str(source_path) + location = getlocation(fixturedef.func, funcitem.config.rootpath) + msg = ( + "The requested fixture has no parameter defined for test:\n" + f" {funcitem.nodeid}\n\n" + f"Requested fixture '{fixturedef.argname}' defined in:\n" + f"{location}\n\n" + f"Requested here:\n" + f"{source_path_str}:{source_lineno}" + ) + fail(msg, pytrace=False) + + def _get_fixturestack(self) -> list[FixtureDef[Any]]: + values = [request._fixturedef for request in self._iter_chain()] + values.reverse() + return values + + +@final +class TopRequest(FixtureRequest): + """The type of the ``request`` fixture in a test function.""" + + def __init__(self, pyfuncitem: Function, *, _ispytest: bool = False) -> None: + super().__init__( + fixturename=None, + pyfuncitem=pyfuncitem, + arg2fixturedefs=pyfuncitem._fixtureinfo.name2fixturedefs.copy(), + fixture_defs={}, + _ispytest=_ispytest, + ) + + @property + def _scope(self) -> Scope: + return Scope.Function + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object], + requested_scope: Scope, + ) -> None: + # TopRequest always has function scope so always valid. + pass + + @property + def node(self): + return self._pyfuncitem + + def __repr__(self) -> str: + return f"" + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + for argname in item.fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self.node.addfinalizer(finalizer) + + +@final +class SubRequest(FixtureRequest): + """The type of the ``request`` fixture in a fixture function requested + (transitively) by a test function.""" + + def __init__( + self, + request: FixtureRequest, + scope: Scope, + param: Any, + param_index: int, + fixturedef: FixtureDef[object], + *, + _ispytest: bool = False, + ) -> None: + super().__init__( + pyfuncitem=request._pyfuncitem, + fixturename=fixturedef.argname, + fixture_defs=request._fixture_defs, + arg2fixturedefs=request._arg2fixturedefs, + _ispytest=_ispytest, + ) + self._parent_request: Final[FixtureRequest] = request + self._scope_field: Final = scope + self._fixturedef: Final[FixtureDef[object]] = fixturedef + if param is not NOTSET: + self.param = param + self.param_index: Final = param_index + + def __repr__(self) -> str: + return f"" + + @property + def _scope(self) -> Scope: + return self._scope_field + + @property + def node(self): + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: nodes.Node | None = self._pyfuncitem + elif scope is Scope.Package: + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, ( + f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}' + ) + return node + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object], + requested_scope: Scope, + ) -> None: + if self._scope > requested_scope: + # Try to report something helpful. + argname = requested_fixturedef.argname + fixture_stack = "\n".join( + self._format_fixturedef_line(fixturedef) + for fixturedef in self._get_fixturestack() + ) + requested_fixture = self._format_fixturedef_line(requested_fixturedef) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {self._scope.value} scoped request object. " + f"Requesting fixture stack:\n{fixture_stack}\n" + f"Requested fixture:\n{requested_fixture}", + pytrace=False, + ) + + def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str: + factory = fixturedef.func + path, lineno = getfslineno(factory) + if isinstance(path, Path): + path = bestrelpath(self._pyfuncitem.session.path, path) + sig = signature(factory) + return f"{path}:{lineno + 1}: def {factory.__name__}{sig}" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._fixturedef.addfinalizer(finalizer) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: str | None, request: FixtureRequest, msg: str | None = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> FixtureLookupErrorRepr: + tblines: list[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + # This function currently makes an assumption that a non-None msg means we + # have a non-empty `self.fixturestack`. This is currently true, but if + # somebody at some point want to extend the use of FixtureLookupError to + # new cases it might break. + # Add the assert to make it clearer to developer that this will fail, otherwise + # it crashes because `fspath` does not get set due to `stack` being empty. + assert self.msg is None or self.fixturestack, ( + "formatrepr assumptions broken, rewrite it to handle it" + ) + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parent = self.request._pyfuncitem.parent + assert parent is not None + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parent)) + if faclist: + available.add(name) + if self.argname in available: + msg = ( + f" recursive dependency involving fixture '{self.argname}' detected" + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: str | os.PathLike[str], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: str | None, + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line(f"{os.fspath(self.filename)}:{self.firstlineno + 1}") + + +def call_fixture_func( + fixturefunc: _FixtureFunc[FixtureValue], request: FixtureRequest, kwargs +) -> FixtureValue: + if inspect.isgeneratorfunction(fixturefunc): + fixturefunc = cast(Callable[..., Generator[FixtureValue]], fixturefunc) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fs, lineno = getfslineno(fixturefunc) + fail( + f"fixture function has more than one 'yield':\n\n" + f"{Source(fixturefunc).indent()}\n" + f"{fs}:{lineno + 1}", + pytrace=False, + ) + + +def _eval_scope_callable( + scope_callable: Callable[[str, Config], _ScopeName], + fixture_name: str, + config: Config, +) -> _ScopeName: + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + f"Error evaluating {scope_callable} while defining fixture '{fixture_name}'.\n" + "Expected a function with the signature (*, fixture_name, config)" + ) from e + if not isinstance(result, str): + fail( + f"Expected {scope_callable} to return a 'str' while defining fixture '{fixture_name}', but it returned:\n" + f"{result!r}", + pytrace=False, + ) + return result + + +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition. + + Note: At this time, only explicitly documented fields and methods are + considered public stable API. + """ + + def __init__( + self, + config: Config, + baseid: str | None, + argname: str, + func: _FixtureFunc[FixtureValue], + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, + params: Sequence[object] | None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + *, + _ispytest: bool = False, + # only used in a deprecationwarning msg, can be removed in pytest9 + _autouse: bool = False, + ) -> None: + check_ispytest(_ispytest) + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a nodeid of a parent of + # node. + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid: Final = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location: Final = baseid is not None + # The fixture factory function. + self.func: Final = func + # The name by which the fixture may be requested. + self.argname: Final = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope: Final = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Final = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids: Final = ids + # The names requested by the fixtures. + self.argnames: Final = getfuncargnames(func, name=argname) + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: _FixtureCachedResult[FixtureValue] | None = None + self._finalizers: Final[list[Callable[[], object]]] = [] + + # only used to emit a deprecationwarning, can be removed in pytest9 + self._autouse = _autouse + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exceptions: list[BaseException] = [] + while self._finalizers: + fin = self._finalizers.pop() + try: + fin() + except BaseException as e: + exceptions.append(e) + node = request.node + node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers.clear() + if len(exceptions) == 1: + raise exceptions[0] + elif len(exceptions) > 1: + msg = f'errors while tearing down fixture "{self.argname}" of {node}' + raise BaseExceptionGroup(msg, exceptions[::-1]) + + def execute(self, request: SubRequest) -> FixtureValue: + """Return the value of this fixture, executing it if not cached.""" + # Ensure that the dependent fixtures requested by this fixture are loaded. + # This needs to be done before checking if we have a cached value, since + # if a dependent fixture has their cache invalidated, e.g. due to + # parametrization, they finalize themselves and fixtures depending on it + # (which will likely include this fixture) setting `self.cached_result = None`. + # See #4871 + requested_fixtures_that_should_finalize_us = [] + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + # Saves requested fixtures in a list so we later can add our finalizer + # to them, ensuring that if a requested fixture gets torn down we get torn + # down first. This is generally handled by SetupState, but still currently + # needed when this fixture is not parametrized but depends on a parametrized + # fixture. + requested_fixtures_that_should_finalize_us.append(fixturedef) + + # Check for (and return) cached value/exception. + if self.cached_result is not None: + request_cache_key = self.cache_key(request) + cache_key = self.cached_result[1] + try: + # Attempt to make a normal == check: this might fail for objects + # which do not implement the standard comparison (like numpy arrays -- #6497). + cache_hit = bool(request_cache_key == cache_key) + except (ValueError, RuntimeError): + # If the comparison raises, use 'is' as fallback. + cache_hit = request_cache_key is cache_key + + if cache_hit: + if self.cached_result[2] is not None: + exc, exc_tb = self.cached_result[2] + raise exc.with_traceback(exc_tb) + else: + return self.cached_result[0] + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + # Add finalizer to requested fixtures we saved previously. + # We make sure to do this after checking for cached value to avoid + # adding our finalizer multiple times. (#12135) + finalizer = functools.partial(self.finish, request=request) + for parent_fixture in requested_fixtures_that_should_finalize_us: + parent_fixture.addfinalizer(finalizer) + + ihook = request.node.ihook + try: + # Setup the fixture, run the code in it, and cache the value + # in self.cached_result. + result: FixtureValue = ihook.pytest_fixture_setup( + fixturedef=self, request=request + ) + finally: + # Schedule our finalizer, even if the setup failed. + request.node.addfinalizer(finalizer) + + return result + + def cache_key(self, request: SubRequest) -> object: + return getattr(request, "param", None) + + def __repr__(self) -> str: + return f"" + + +class RequestFixtureDef(FixtureDef[FixtureRequest]): + """A custom FixtureDef for the special "request" fixture. + + A new one is generated on-demand whenever "request" is requested. + """ + + def __init__(self, request: FixtureRequest) -> None: + super().__init__( + config=request.config, + baseid=None, + argname="request", + func=lambda: request, + scope=Scope.Function, + params=None, + _ispytest=True, + ) + self.cached_result = (request, [0], None) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + pass + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> _FixtureFunc[FixtureValue]: + """Get the actual callable that can be called to obtain the fixture + value.""" + fixturefunc = fixturedef.func + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + instance = request.instance + if instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + instance, + fixturefunc.__self__.__class__, + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(instance) + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + kwargs[argname] = request.getfixturevalue(argname) + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + + if inspect.isasyncgenfunction(fixturefunc) or inspect.iscoroutinefunction( + fixturefunc + ): + auto_str = " with autouse=True" if fixturedef._autouse else "" + + warnings.warn( + PytestRemovedIn9Warning( + f"{request.node.name!r} requested an async fixture " + f"{request.fixturename!r}{auto_str}, with no plugin or hook that " + "handled it. This is usually an error, as pytest does not natively " + "support it. " + "This will turn into an error in pytest 9.\n" + "See: https://docs.pytest.org/en/stable/deprecations.html#sync-test-depending-on-async-fixture" + ), + # no stacklevel will point at users code, so we just point here + stacklevel=1, + ) + + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME as e: + if isinstance(e, skip.Exception): + # The test requested a fixture which caused a skip. + # Don't show the fixture as the skip location, as then the user + # wouldn't know which test skipped. + e._use_item_location = True + fixturedef.cached_result = (None, my_cache_key, (e, e.__traceback__)) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +@final +@dataclasses.dataclass(frozen=True) +class FixtureFunctionMarker: + scope: _ScopeName | Callable[[str, Config], _ScopeName] + params: tuple[object, ...] | None + autouse: bool = False + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None + name: str | None = None + + _ispytest: dataclasses.InitVar[bool] = False + + def __post_init__(self, _ispytest: bool) -> None: + check_ispytest(_ispytest) + + def __call__(self, function: FixtureFunction) -> FixtureFunctionDefinition: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if isinstance(function, FixtureFunctionDefinition): + raise ValueError( + f"@pytest.fixture is being applied more than once to the same function {function.__name__!r}" + ) + + if hasattr(function, "pytestmark"): + warnings.warn(MARKED_FIXTURE, stacklevel=2) + + fixture_definition = FixtureFunctionDefinition( + function=function, fixture_function_marker=self, _ispytest=True + ) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + f"'request' is a reserved word for fixtures, use another name:\n {location}", + pytrace=False, + ) + + return fixture_definition + + +# TODO: paramspec/return type annotation tracking and storing +class FixtureFunctionDefinition: + def __init__( + self, + *, + function: Callable[..., Any], + fixture_function_marker: FixtureFunctionMarker, + instance: object | None = None, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = fixture_function_marker.name or function.__name__ + # In order to show the function that this fixture contains in messages. + # Set the __name__ to be same as the function __name__ or the given fixture name. + self.__name__ = self.name + self._fixture_function_marker = fixture_function_marker + if instance is not None: + self._fixture_function = cast( + Callable[..., Any], function.__get__(instance) + ) + else: + self._fixture_function = function + functools.update_wrapper(self, function) + + def __repr__(self) -> str: + return f"" + + def __get__(self, instance, owner=None): + """Behave like a method if the function it was applied to was a method.""" + return FixtureFunctionDefinition( + function=self._fixture_function, + fixture_function_marker=self._fixture_function_marker, + instance=instance, + _ispytest=True, + ) + + def __call__(self, *args: Any, **kwds: Any) -> Any: + message = ( + f'Fixture "{self.name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly" + ) + fail(message, pytrace=False) + + def _get_wrapped_function(self) -> Callable[..., Any]: + return self._fixture_function + + +@overload +def fixture( + fixture_function: Callable[..., object], + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = ..., +) -> FixtureFunctionDefinition: ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = None, +) -> FixtureFunctionMarker: ... + + +def fixture( + fixture_function: FixtureFunction | None = None, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Iterable[object] | None = None, + autouse: bool = False, + ids: Sequence[object | None] | Callable[[Any], object | None] | None = None, + name: str | None = None, +) -> FixtureFunctionMarker | FixtureFunctionDefinition: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=tuple(params) if params is not None else None, + autouse=autouse, + ids=None if ids is None else ids if callable(ids) else tuple(ids), + name=name, + _ispytest=True, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.get_verbosity() > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="List of default fixtures to be used with this project", + ) + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="Show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="Show fixtures per test", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def _get_direct_parametrize_args(node: nodes.Node) -> set[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: set[str] = set() + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.update(p_argnames) + return parametrize_argnames + + +def deduplicate_names(*seqs: Iterable[str]) -> tuple[str, ...]: + """De-duplicate the sequence of names while keeping the original order.""" + # Ideally we would use a set, but it does not preserve insertion order. + return tuple(dict.fromkeys(name for seq in seqs for name in seq)) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - config-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + def __init__(self, session: Session) -> None: + self.session = session + self.config: Config = session.config + # Maps a fixture name (argname) to all of the FixtureDefs in the test + # suite/plugins defined with this name. Populated by parsefactories(). + # TODO: The order of the FixtureDefs list of each arg is significant, + # explain. + self._arg2fixturedefs: Final[dict[str, list[FixtureDef[Any]]]] = {} + self._holderobjseen: Final[set[object]] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Final[dict[str, list[str]]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo( + self, + node: nodes.Item, + func: Callable[..., object] | None, + cls: type | None, + ) -> FuncFixtureInfo: + """Calculate the :class:`FuncFixtureInfo` for an item. + + If ``func`` is None, or if the item sets an attribute + ``nofuncargs = True``, then ``func`` is not examined at all. + + :param node: + The item requesting the fixtures. + :param func: + The item's function. + :param cls: + If the function is a method, the method's class. + """ + if func is not None and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + usefixturesnames = self._getusefixturesnames(node) + autousenames = self._getautousenames(node) + initialnames = deduplicate_names(autousenames, usefixturesnames, argnames) + + direct_parametrize_args = _get_direct_parametrize_args(node) + + names_closure, arg2fixturedefs = self.getfixtureclosure( + parentnode=node, + initialnames=initialnames, + ignore_args=direct_parametrize_args, + ) + + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None: + # Fixtures defined in conftest plugins are only visible to within the + # conftest's directory. This is unlike fixtures in non-conftest plugins + # which have global visibility. So for conftests, construct the base + # nodeid from the plugin name (which is the conftest path). + if plugin_name and plugin_name.endswith("conftest.py"): + # Note: we explicitly do *not* use `plugin.__file__` here -- The + # difference is that plugin_name has the correct capitalization on + # case-insensitive systems (Windows) and other normalization issues + # (issue #11816). + conftestpath = absolutepath(plugin_name) + try: + nodeid = str(conftestpath.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + else: + nodeid = None + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, node: nodes.Node) -> Iterator[str]: + """Return the names of autouse fixtures applicable to node.""" + for parentnode in node.listchain(): + basenames = self._nodeid_autousenames.get(parentnode.nodeid) + if basenames: + yield from basenames + + def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]: + """Return the names of usefixtures fixtures applicable to node.""" + for marker_node, mark in node.iter_markers_with_node(name="usefixtures"): + if not mark.args: + marker_node.warn( + PytestWarning( + f"usefixtures() in {node.nodeid} without arguments has no effect" + ) + ) + yield from mark.args + + def getfixtureclosure( + self, + parentnode: nodes.Node, + initialnames: tuple[str, ...], + ignore_args: AbstractSet[str], + ) -> tuple[list[str], dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + fixturenames_closure = list(initialnames) + + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {} + + # Track the index for each fixture name in the simulated stack. + # Needed for handling override chains correctly, similar to _get_active_fixturedef. + # Using negative indices: -1 is the most specific (last), -2 is second to last, etc. + current_indices: dict[str, int] = {} + + def process_argname(argname: str) -> None: + # Optimization: already processed this argname. + if current_indices.get(argname) == -1: + return + + if argname not in fixturenames_closure: + fixturenames_closure.append(argname) + + if argname in ignore_args: + return + + fixturedefs = arg2fixturedefs.get(argname) + if not fixturedefs: + fixturedefs = self.getfixturedefs(argname, parentnode) + if not fixturedefs: + # Fixture not defined or not visible (will error during runtest). + return + arg2fixturedefs[argname] = fixturedefs + + index = current_indices.get(argname, -1) + if -index > len(fixturedefs): + # Exhausted the override chain (will error during runtest). + return + fixturedef = fixturedefs[index] + + current_indices[argname] = index - 1 + for dep in fixturedef.argnames: + process_argname(dep) + current_indices[argname] = index + + for name in initialnames: + process_argname(name) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: Metafunc) -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def _register_fixture( + self, + *, + name: str, + func: _FixtureFunc[object], + nodeid: str | None, + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Sequence[object] | None = None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + autouse: bool = False, + ) -> None: + """Register a fixture + + :param name: + The fixture's name. + :param func: + The fixture's implementation function. + :param nodeid: + The visibility of the fixture. The fixture will be available to the + node with this nodeid and its children in the collection tree. + None means that the fixture is visible to the entire collection tree, + e.g. a fixture defined for general use in a plugin. + :param scope: + The fixture's scope. + :param params: + The fixture's parametrization params. + :param ids: + The fixture's IDs. + :param autouse: + Whether this is an autouse fixture. + """ + fixture_def = FixtureDef( + config=self.config, + baseid=nodeid, + argname=name, + func=func, + scope=scope, + params=params, + ids=ids, + _ispytest=True, + _autouse=autouse, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if autouse: + self._nodeid_autousenames.setdefault(nodeid or "", []).append(name) + + @overload + def parsefactories( + self, + node_or_obj: nodes.Node, + ) -> None: + raise NotImplementedError() + + @overload + def parsefactories( + self, + node_or_obj: object, + nodeid: str | None, + ) -> None: + raise NotImplementedError() + + def parsefactories( + self, + node_or_obj: nodes.Node | object, + nodeid: str | NotSetType | None = NOTSET, + ) -> None: + """Collect fixtures from a collection node or object. + + Found fixtures are parsed into `FixtureDef`s and saved. + + If `node_or_object` is a collection node (with an underlying Python + object), the node's object is traversed and the node's nodeid is used to + determine the fixtures' visibility. `nodeid` must not be specified in + this case. + + If `node_or_object` is an object (e.g. a plugin), the object is + traversed and the given `nodeid` is used to determine the fixtures' + visibility. `nodeid` must be specified in this case; None and "" mean + total visibility. + """ + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + assert isinstance(node_or_obj, nodes.Node) + holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined] + assert isinstance(node_or_obj.nodeid, str) + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + # Avoid accessing `@property` (and other descriptors) when iterating fixtures. + if not safe_isclass(holderobj) and not isinstance(holderobj, types.ModuleType): + holderobj_tp: object = type(holderobj) + else: + holderobj_tp = holderobj + + self._holderobjseen.add(holderobj) + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getattr() ignores such exceptions. + obj_ub = safe_getattr(holderobj_tp, name, None) + if type(obj_ub) is FixtureFunctionDefinition: + marker = obj_ub._fixture_function_marker + if marker.name: + fixture_name = marker.name + else: + fixture_name = name + + # OK we know it is a fixture -- now safe to look up on the _instance_. + try: + obj = getattr(holderobj, name) + # if the fixture is named in the decorator we cannot find it in the module + except AttributeError: + obj = obj_ub + + func = obj._get_wrapped_function() + + self._register_fixture( + name=fixture_name, + nodeid=nodeid, + func=func, + scope=marker.scope, + params=marker.params, + ids=marker.ids, + autouse=marker.autouse, + ) + + def getfixturedefs( + self, argname: str, node: nodes.Node + ) -> Sequence[FixtureDef[Any]] | None: + """Get FixtureDefs for a fixture name which are applicable + to a given node. + + Returns None if there are no fixtures at all defined with the given + name. (This is different from the case in which there are fixtures + with the given name, but none applicable to the node. In this case, + an empty result is returned). + + :param argname: Name of the fixture to search for. + :param node: The requesting Node. + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, node)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], node: nodes.Node + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = {n.nodeid for n in node.iter_parents()} + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef + + +def show_fixtures_per_test(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def _pretty_fixture_path(invocation_dir: Path, func) -> str: + loc = Path(getlocation(func, invocation_dir)) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(invocation_dir, loc) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + def get_best_relpath(func) -> str: + loc = getlocation(func, invocation_dir) + return bestrelpath(invocation_dir, Path(loc)) + + def write_fixture(fixture_def: FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(invocation_dir, fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, + fixture_doc.split("\n\n", maxsplit=1)[0] + if verbose <= 0 + else fixture_doc, + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: FuncFixtureInfo | None = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + fm = session._fixturemanager + + available = [] + seen: set[tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, invocation_dir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(invocation_dir, fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(f" [{fixturedef.scope} scope]", cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring( + tw, doc.split("\n\n", maxsplit=1)[0] if verbose <= 0 else doc + ) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/freeze_support.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..959ff07 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" + +from __future__ import annotations + +from collections.abc import Iterator +import types + + +def freeze_includes() -> list[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: str | types.ModuleType, + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/helpconfig.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..6a22c9f --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/helpconfig.py @@ -0,0 +1,293 @@ +# mypy: allow-untyped-defs +"""Version info, help messages, tracing configuration.""" + +from __future__ import annotations + +import argparse +from collections.abc import Generator +from collections.abc import Sequence +import os +import sys +from typing import Any + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser +from _pytest.terminal import TerminalReporter +import pytest + + +class HelpAction(argparse.Action): + """An argparse Action that will raise a PrintHelp exception in order to skip + the rest of the argument parsing when --help is passed. + + This prevents argparse from raising UsageError when `--help` is used along + with missing required arguments when any are defined, for example by + ``pytest_addoption``. This is similar to the way that the builtin argparse + --help option is implemented by raising SystemExit. + + To opt in to this behavior, the parse caller must set + `namespace._raise_print_help = True`. Otherwise it just sets the option. + """ + + def __init__( + self, option_strings: Sequence[str], dest: str, *, help: str | None = None + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=True, + default=False, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[Any] | None, + option_string: str | None = None, + ) -> None: + setattr(namespace, self.dest, self.const) + + if getattr(namespace, "_raise_print_help", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="Display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( # private to use reserved lower-case short option + "-h", + "--help", + action=HelpAction, + dest="help", + help="Show help message and configuration info", + ) + group._addoption( # private to use reserved lower-case short option + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="Early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`. See also --disable-plugin-autoload.", + ) + group.addoption( + "--disable-plugin-autoload", + action="store_true", + default=False, + help="Disable plugin auto-loading through entry point packaging metadata. " + "Only plugins explicitly specified in -p or env var PYTEST_PLUGINS will be loaded.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="Trace considerations of conftest.py files", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="Store internal tracing debug information in this log file. " + "This file is opened with 'w' and truncated as a result, care advised. " + "Default: pytestdebug.log.", + ) + group._addoption( # private to use reserved lower-case short option + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='Override configuration option with "option=value" style, ' + "e.g. `-o strict_xfail=True -o cache_dir=cache`.", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_cmdline_parse() -> Generator[None, Config, Config]: + config = yield + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w", encoding="utf-8") + debugfile.write( + "versions pytest-{}, " + "python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format( + pytest.__version__, + ".".join(map(str, sys.version_info)), + config.invocation_params.dir, + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write(f"writing pytest debug information to {path}\n") + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n") + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + return config + + +def show_version_verbose(config: Config) -> None: + """Show verbose pytest version installation, including plugins.""" + sys.stdout.write( + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + # Note: a single `--version` argument is handled directly by `Config.main()` to avoid starting up the entire + # pytest infrastructure just to display the version (#13574). + if config.option.version > 1: + show_version_verbose(config) + return ExitCode.OK + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return ExitCode.OK + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter: TerminalReporter | None = config.pluginmanager.get_plugin( + "terminalreporter" + ) + assert reporter is not None + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] configuration options in the first " + "pytest.toml|pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._inidict: + help, type, _default = config._parser._inidict[name] + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(f" {spec}") + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("Environment variables:") + vars = [ + ( + "CI", + "When set to a non-empty value, pytest knows it is running in a " + "CI process and does not truncate summary info", + ), + ("BUILD_NUMBER", "Equivalent to CI"), + ("PYTEST_ADDOPTS", "Extra command line options"), + ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ("PYTEST_DEBUG_TEMPROOT", "Override the system temporary directory"), + ("PYTEST_THEME", "The Pygments style to use for code output"), + ("PYTEST_THEME_MODE", "Set the PYTEST_THEME to be either 'dark' or 'light'"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + + +def getpluginversioninfo(config: Config) -> list[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("registered third-party plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> list[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/hookspec.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..c5bcc36 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/hookspec.py @@ -0,0 +1,1342 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" + +from __future__ import annotations + +from collections.abc import Mapping +from collections.abc import Sequence +from pathlib import Path +from typing import Any +from typing import TYPE_CHECKING + +from pluggy import HookspecMarker + +from .deprecated import HOOK_LEGACY_PATH_ARG + + +if TYPE_CHECKING: + import pdb + from typing import Literal + import warnings + + from _pytest._code.code import ExceptionInfo + from _pytest._code.code import ExceptionRepr + from _pytest.compat import LEGACY_PATH + from _pytest.config import _PluggyPlugin + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.terminal import TestShortLogReport + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: + """Called at plugin registration time to allow adding new hooks via a call to + :func:`pluginmanager.add_hookspecs(module_or_class, prefix) `. + + :param pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: _PluggyPlugin, + plugin_name: str, + manager: PytestPluginManager, +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param plugin_name: The name by which the plugin is registered. + :param manager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered, once for each plugin registered thus far + (including itself!), and for all plugins thereafter when they are + registered. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None: + """Register argparse-style options and config-style config values, + called once at the beginning of a test run. + + :param parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add config-file values call :py:func:`parser.addini(...) + `. + + :param pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s + or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from a configuration file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec(historic=True) +def pytest_configure(config: Config) -> None: + """Allow plugins and conftest files to perform initial configuration. + + .. note:: + This hook is incompatible with hook wrappers. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + This hook is called for every :ref:`initial conftest ` file + after command line options have been parsed. After that, the hook is called + for other conftest files as they are registered. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: PytestPluginManager, args: list[str] +) -> Config | None: + """Return an initialized :class:`~pytest.Config`, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook is only called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pluginmanager: The pytest plugin manager. + :param args: List of arguments passed on the command line. + :returns: A pytest config object. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +def pytest_load_initial_conftests( + early_config: Config, parser: Parser, args: list[str] +) -> None: + """Called to implement the loading of :ref:`initial conftest files + ` ahead of command line option parsing. + + :param early_config: The pytest config object. + :param args: Arguments passed on the command line. + :param parser: To add command line options. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: Config) -> ExitCode | int | None: + """Called for performing the main command line action. + + The default implementation will invoke the configure hooks and + :hook:`pytest_runtestloop`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :returns: The exit code. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: Session) -> object | None: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: list[Item] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + When items are deselected (filtered out from ``items``), + the hook :hook:`pytest_deselected` must be called explicitly + with the deselected items to properly notify other plugins, + e.g. with ``config.hook.pytest_deselected(items=deselected_items)``. + + :param session: The pytest session object. + :param config: The pytest config object. + :param items: List of item objects. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_collection_finish(session: Session) -> None: + """Called after collection has been performed and modified. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="collection_path" + ), + }, +) +def pytest_ignore_collect( + collection_path: Path, path: LEGACY_PATH, config: Config +) -> bool | None: + """Return ``True`` to ignore this path for collection. + + Return ``None`` to let other plugins ignore the path for collection. + + Returning ``False`` will forcefully *not* ignore this path for collection, + without giving a chance for other plugins to ignore this path. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collection_path: The path to analyze. + :type collection_path: pathlib.Path + :param path: The path to analyze (deprecated). + :param config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot ignore itself!). + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path: Path, parent: Collector) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given directory, or None if + not relevant. + + .. versionadded:: 8.0 + + For best results, the returned collector should be a subclass of + :class:`~pytest.Directory`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + Stops at first non-None result, see :ref:`firstresult`. + + :param path: The path to analyze. + :type path: pathlib.Path + + See :ref:`custom directory collectors` for a simple example of use of this + hook. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot collect itself!). + """ + + +@hookspec( + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="file_path" + ), + }, +) +def pytest_collect_file( + file_path: Path, path: LEGACY_PATH, parent: Collector +) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + + For best results, the returned collector should be a subclass of + :class:`~pytest.File`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + :param file_path: The path to analyze. + :type file_path: pathlib.Path + :param path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given file path, only + conftest files in parent directories of the file path are consulted. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: Collector) -> None: + """Collector starts collecting. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_itemcollected(item: Item) -> None: + """We just collected a test item. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_collectreport(report: CollectReport) -> None: + """Collector finished collecting. + + :param report: + The collect report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_deselected(items: Sequence[Item]) -> None: + """Called for deselected test items, e.g. by keyword. + + Note that this hook has two integration aspects for plugins: + + - it can be *implemented* to be notified of deselected items + - it must be *called* from :hook:`pytest_collection_modifyitems` + implementations when items are deselected (to properly notify other plugins). + + May be called multiple times. + + :param items: + The items. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: Collector) -> CollectReport | None: + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="module_path" + ), + }, +) +def pytest_pycollect_makemodule( + module_path: Path, path: LEGACY_PATH, parent +) -> Module | None: + """Return a :class:`pytest.Module` collector or None for the given path. + + This hook will be called for each matching test module path. + The :hook:`pytest_collect_file` hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param module_path: The path of the module to collect. + :type module_path: pathlib.Path + :param path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given parent collector, + only conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | Item | Collector | list[Item | Collector]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The module/class collector. + :param name: + The name of the object in the module/class. + :param obj: + The object. + :returns: + The created items/collectors. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pyfuncitem: + The function item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only + conftest files in the item's directory and its parent directories + are consulted. + """ + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + """Generate (multiple) parametrized calls to a test function. + + :param metafunc: + The :class:`~pytest.Metafunc` helper for the test function. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given function definition, + only conftest files in the functions's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config: Config, val: object, argname: str) -> str | None: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :param val: The parametrized value. + :param argname: The automatic parameter name produced by pytest. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: Session) -> object | None: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> object | None: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_runtest_logstart(nodeid: str, location: tuple[str, int | None, str]) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: tuple[str, int | None, str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_setup(item: Item) -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_call(item: Item) -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param item: + The item. + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport | None: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param item: The item. + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logreport(report: TestReport) -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: Config, + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON. + + :param config: The pytest config object. + :param report: The report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: Config, + data: dict[str, Any], +) -> CollectReport | TestReport | None: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[Any], request: SubRequest +) -> object | None: + """Perform fixture setup execution. + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + :returns: + The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[Any], request: SubRequest +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``). + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: Session) -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_sessionfinish( + session: Session, + exitstatus: int | ExitCode, +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param session: The pytest session object. + :param exitstatus: The status which pytest will return to the system. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_unconfigure(config: Config) -> None: + """Called before test process is exited. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: Config, op: str, left: object, right: object +) -> list[str] | None: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param config: The pytest config object. + :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`. + :param left: The left operand. + :param right: The right operand. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the :confval:`enable_assertion_pass_hook` + configuration option: + + .. tab:: toml + + .. code-block:: toml + + [pytest] + enable_assertion_pass_hook = true + + .. tab:: ini + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook = true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param item: pytest item object of current test. + :param lineno: Line number of the assert statement. + :param orig: String with the original assertion. + :param expl: String with the assert explanation. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_header( # type:ignore[empty-body] + config: Config, start_path: Path, startdir: LEGACY_PATH +) -> str | list[str]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_collectionfinish( # type:ignore[empty-body] + config: Config, + start_path: Path, + startdir: LEGACY_PATH, + items: Sequence[Item], +) -> str | list[str]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( # type:ignore[empty-body] + report: CollectReport | TestReport, config: Config +) -> TestShortLogReport | tuple[str, str, str | tuple[str, Mapping[str, bool]]]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + :returns: The test status. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_terminal_summary( + terminalreporter: TerminalReporter, + exitstatus: ExitCode, + config: Config, +) -> None: + """Add a section to terminal summary reporting. + + :param terminalreporter: The internal terminal reporter object. + :param exitstatus: The exit status that will be reported back to the OS. + :param config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: warnings.WarningMessage, + when: Literal["config", "collect", "runtest"], + nodeid: str, + location: tuple[str, int, str] | None, +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warning_message: + The captured warning. This is the same object produced by :class:`warnings.catch_warnings`, + and contains the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param nodeid: + Full id of the item. Empty string for warnings that are not specific to + a particular node. + + :param location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. If the warning is specific to a + particular node, only conftest files in parent directories of the node are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace( # type:ignore[empty-body] + config: Config, +) -> dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param config: The pytest config object. + :returns: A dictionary of additional globals to add. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in parent directories of the item are consulted. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: ExceptionRepr, + excinfo: ExceptionInfo[BaseException], +) -> bool | None: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + + :param excrepr: The exception repr object. + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_keyboard_interrupt( + excinfo: ExceptionInfo[KeyboardInterrupt | Exit], +) -> None: + """Called for keyboard interrupt. + + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_exception_interact( + node: Item | Collector, + call: CallInfo[Any], + report: CollectReport | TestReport, +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`~pytest.CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`~pytest.TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + + :param node: + The item or collector. + :param call: + The call information. Contains the exception. + :param report: + The collection or test report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given node, only conftest + files in parent directories of the node are consulted. + """ + + +def pytest_enter_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_leave_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/junitxml.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..ae8d2b9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/junitxml.py @@ -0,0 +1,695 @@ +# mypy: allow-untyped-defs +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" + +from __future__ import annotations + +from collections.abc import Callable +import functools +import os +import platform +import re +import xml.etree.ElementTree as ET + +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: re.Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return f"#x{i:02X}" + else: + return f"#x{i:04X}" + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000a\u000d\u0020-\u007e\u0080-\ud7ff\ue000-\ufffd\u10000-\u10ffff]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = { # pylint: disable=dict-init-mutate + "_base": {"testcase": ["classname", "name"]}, + "_base_legacy": {"testcase": ["file", "line", "url"]}, +} +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: str | TestReport, xml: LogXML) -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: list[tuple[str, str]] = [] + self.nodes: list[ET.Element] = [] + self.attrs: dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs: + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time=f"{self.duration:.3f}") + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: str | None = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr(report.longrepr, "reprcrash", None) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", bin_xml_escape(msg), str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element( + "skipped", type="pytest.skip", message=bin_xml_escape(skipreason) + ) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[method-assign] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + f"{fixture_name} is incompatible with junit_family '{xml.family}' (use 'legacy' or 'xunit1')" + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + :param name: + The property name. + :param value: + The property value. Will be converted to a string. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junit-xml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="Create junit-xml style report file at given path", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="Prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> list[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: str | None, + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: dict[tuple[str | TestReport, object], _NodeReporter] = {} + self.node_reporters_ordered: list[_NodeReporter] = [] + self.global_properties: list[tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: list[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: TestReport | str) -> _NodeReporter: + nodeid: str | TestReport = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration in {"total", report.when}: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start = timing.Instant() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + # exist_ok avoids filesystem race conditions between checking path existence and requesting creation + os.makedirs(dirname, exist_ok=True) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + duration = self.suite_start.elapsed() + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time=f"{duration.seconds:.3f}", + timestamp=self.suite_start.as_utc().astimezone().isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.set("name", "pytest tests") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary( + self, terminalreporter: TerminalReporter, config: pytest.Config + ) -> None: + if config.get_verbosity() >= 0: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/legacypath.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/legacypath.py new file mode 100644 index 0000000..59e8ef6 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/legacypath.py @@ -0,0 +1,468 @@ +# mypy: allow-untyped-defs +"""Add backward compatibility support for the legacy py path type.""" + +from __future__ import annotations + +import dataclasses +from pathlib import Path +import shlex +import subprocess +from typing import Final +from typing import final +from typing import TYPE_CHECKING + +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + + +if TYPE_CHECKING: + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: Final = Pytester.CLOSE_STDIN + TimeoutExpired: Final = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Item | Collector | None: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: list[Item | Collector]) -> list[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@dataclasses.dataclass +class TempdirFactory: + """Backward compatibility wrapper that implements ``py.path.local`` + for :class:`TempPathFactory`. + + .. note:: + These days, it is preferred to use ``tmp_path_factory``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + """ + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory (as `legacy_path`_ object) + which is unique to each test function invocation. + The temporary directory is created as a subdirectory + of the base temporary directory, with configurable retention, + as discussed in :ref:`temporary directory location and retention`. + + .. note:: + These days, it is preferred to use ``tmp_path``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> LEGACY_PATH | None: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_startdir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type(self, name: str, type: str, value: str | list[str]): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_startdir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/logging.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/logging.py new file mode 100644 index 0000000..e4fed57 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/logging.py @@ -0,0 +1,960 @@ +# mypy: allow-untyped-defs +"""Access and control log capturing.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import Set as AbstractSet +from contextlib import contextmanager +from contextlib import nullcontext +from datetime import datetime +from datetime import timedelta +from datetime import timezone +import io +from io import StringIO +import logging +from logging import LogRecord +import os +from pathlib import Path +import re +from types import TracebackType +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] +else: + logging_StreamHandler = logging.StreamHandler + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[dict[str, list[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class DatetimeFormatter(logging.Formatter): + """A logging formatter which formats record with + :func:`datetime.datetime.strftime` formatter instead of + :func:`time.strftime` in case of microseconds in format string. + """ + + def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str: + if datefmt and "%f" in datefmt: + ct = self.converter(record.created) + tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) + # Construct `datetime.datetime` object from `struct_time` + # and msecs information from `record` + # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861). + dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz) + return dt.strftime(datefmt) + # Use `logging.Formatter` for non-microsecond formats + return super().formatTime(record, datefmt) + + +class ColoredLevelFormatter(DatetimeFormatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: int | str | bool | None) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: int | str | bool | None) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="Default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "Level of messages to catch/display." + " Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='Enable log display during test run (also known as "live logging")', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="Path to a file when logging will be written to", + ) + add_option_ini( + "--log-file-mode", + dest="log_file_mode", + default="w", + choices=["w", "a"], + help="Log file open mode", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="Log file logging level", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + group.addoption( + "--log-disable", + action="append", + default=[], + dest="logger_disable", + help="Disable a logger by name. Can be passed multiple times.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs(Generic[_HandlerType]): + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: int | None = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self) -> _HandlerType: + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: list[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def clear(self) -> None: + self.records.clear() + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise # noqa: PLE0704 + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: int | None = None + # Dict of log name -> log level. + self._initial_logger_levels: dict[str | None, int] = {} + self._initial_disabled_logging_level: int | None = None + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels and the disabled logging levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + # Disable logging at the original disabled logging level. + if self._initial_disabled_logging_level is not None: + logging.disable(self._initial_disabled_logging_level) + self._initial_disabled_logging_level = None + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture.""" + return self._item.stash[caplog_handler_key] + + def get_records( + self, when: Literal["setup", "call", "teardown"] + ) -> list[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param when: + Which test phase to obtain the records from. + Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> list[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> list[tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> list[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.clear() + + def _force_enable_logging( + self, level: int | str, logger_obj: logging.Logger + ) -> int: + """Enable the desired logging level if the global level was disabled via ``logging.disabled``. + + Only enables logging levels greater than or equal to the requested ``level``. + + Does nothing if the desired ``level`` wasn't disabled. + + :param level: + The logger level caplog should capture. + All logging is enabled if a non-standard logging level string is supplied. + Valid level strings are in :data:`logging._nameToLevel`. + :param logger_obj: The logger object to check. + + :return: The original disabled logging level. + """ + original_disable_level: int = logger_obj.manager.disable + + if isinstance(level, str): + # Try to translate the level string to an int for `logging.disable()` + level = logging.getLevelName(level) + + if not isinstance(level, int): + # The level provided was not valid, so just un-disable all logging. + logging.disable(logging.NOTSET) + elif not logger_obj.isEnabledFor(level): + # Each level is `10` away from other levels. + # https://docs.python.org/3/library/logging.html#logging-levels + disable_level = max(level - 10, logging.NOTSET) + logging.disable(disable_level) + + return original_disable_level + + def set_level(self, level: int | str, logger: str | None = None) -> None: + """Set the threshold level of a logger for the duration of a test. + + Logging messages which are less severe than this level will not be captured. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) + if self._initial_disabled_logging_level is None: + self._initial_disabled_logging_level = initial_disabled_logging_level + + @contextmanager + def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + original_disable_level = self._force_enable_logging(level, logger_obj) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + logging.disable(original_disable_level) + + @contextmanager + def filtering(self, filter_: logging.Filter) -> Generator[None]: + """Context manager that temporarily adds the given filter to the caplog's + :meth:`handler` for the 'with' statement block, and removes that filter at the + end of the block. + + :param filter_: A custom :class:`logging.Filter` object. + + .. versionadded:: 7.5 + """ + self.handler.addFilter(filter_) + try: + yield + finally: + self.handler.removeFilter(filter_) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + f"'{log_level}' is not recognized as a logging level name for " + f"'{setting_name}'. Please consider passing the " + "logging level num instead." + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting( + config, "log_file_level", "log_level" + ) + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_mode = get_option_ini(config, "log_file_mode") or "w" + self.log_file_handler = _FileHandler( + log_file, mode=self.log_file_mode, encoding="UTF-8" + ) + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = DatetimeFormatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + # Guaranteed by `_log_cli_enabled()`. + assert terminal_reporter is not None + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: ( + _LiveLoggingStreamHandler | _LiveLoggingNullHandler + ) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + self._disable_loggers(loggers_to_disable=config.option.logger_disable) + + def _disable_loggers(self, loggers_to_disable: list[str]) -> None: + if not loggers_to_disable: + return + + for name in loggers_to_disable: + logger = logging.getLogger(name) + logger.disabled = True + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = DatetimeFormatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self) -> bool: + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]: + if session.config.option.collectonly: + return (yield) + + if self._log_cli_enabled() and self._config.get_verbosity() < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + @contextmanager + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with ( + catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, + catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler, + ): + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + try: + yield + finally: + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("setup") + + empty: dict[str, list[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + with self._runtest_for(item, "setup"): + yield + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("call") + + with self._runtest_for(item, "call"): + yield + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("teardown") + + try: + with self._runtest_for(item, "teardown"): + yield + finally: + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: CaptureManager | None, + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: str | None) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/main.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/main.py new file mode 100644 index 0000000..9bc930d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/main.py @@ -0,0 +1,1203 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" + +from __future__ import annotations + +import argparse +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Sequence +from collections.abc import Set as AbstractSet +import dataclasses +import fnmatch +import functools +import importlib +import importlib.util +import os +from pathlib import Path +import sys +from typing import final +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +import _pytest._code +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import OverrideIniAction +from _pytest.config.argparsing import Parser +from _pytest.config.compat import PathAwareHookProxy +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import safe_exists +from _pytest.pathlib import samefile_nofollow +from _pytest.pathlib import scandir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.fixtures import FixtureManager + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( # private to use reserved lower-case short option + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="Exit instantly on first error or failed test", + ) + group.addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="Exit after first num failures or errors", + ) + group.addoption( + "--strict-config", + action=OverrideIniAction, + ini_option="strict_config", + ini_value="true", + help="Enables the strict_config option", + ) + group.addoption( + "--strict-markers", + action=OverrideIniAction, + ini_option="strict_markers", + ini_value="true", + help="Enables the strict_markers option", + ) + group.addoption( + "--strict", + action=OverrideIniAction, + ini_option="strict", + ini_value="true", + help="Enables the strict option", + ) + parser.addini( + "strict_config", + "Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + type="bool", + # None => fallback to `strict`. + default=None, + ) + parser.addini( + "strict_markers", + "Markers not registered in the `markers` section of the configuration " + "file raise errors", + type="bool", + # None => fallback to `strict`. + default=None, + ) + parser.addini( + "strict", + "Enables all strictness options, currently: " + "strict_config, strict_markers, strict_xfail, strict_parametrization_ids", + type="bool", + default=False, + ) + + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="Set which warnings to report, see -W option of Python itself", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="Only collect tests, don't execute them", + ) + group.addoption( + "--pyargs", + action="store_true", + help="Try to interpret all arguments as Python packages", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="Ignore path during collection (multi-allowed)", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="Ignore path pattern during collection (multi-allowed)", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="Deselect item (via node id prefix) during collection (multi-allowed)", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="Only load conftest.py's relative to specified dir", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="Prepend/append to sys.path when importing test modules and conftest " + "files. Default: prepend.", + ) + parser.addini( + "norecursedirs", + "Directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "Directories to search for tests when no files or directories are given on the " + "command line", + type="args", + default=[], + ) + parser.addini( + "collect_imported_tests", + "Whether to collect tests in imported modules outside `testpaths`", + type="bool", + default=True, + ) + parser.addini( + "consider_namespace_packages", + type="bool", + default=False, + help="Consider namespace packages when resolving module names during import", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group._addoption( # private to use reserved lower-case short option + "-c", + "--config-file", + metavar="FILE", + type=str, + dest="inifilename", + help="Load configuration from `FILE` instead of trying to locate one of the " + "implicit configuration files.", + ) + group.addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "Base temporary directory for this test run. " + "(Warning: this directory is removed if it exists.)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, Session], int | ExitCode | None] +) -> int | ExitCode: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: int | ExitCode = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> int | ExitCode: + return wrap_session(config, _main) + + +def _main(config: Config, session: Session) -> int | ExitCode | None: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: Session) -> None: + session.perform_collect() + + +def pytest_runtestloop(session: Session) -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the pyvenv.cfg file. + + [https://peps.python.org/pep-0405/] + + For regression protection we also check for conda environments that do not include pyenv.cfg yet -- + https://github.com/conda/conda/issues/13337 is the conda issue tracking adding pyenv.cfg. + + Checking for the `conda-meta/history` file per https://github.com/pytest-dev/pytest/issues/12652#issuecomment-2246336902. + + """ + try: + return ( + path.joinpath("pyvenv.cfg").is_file() + or path.joinpath("conda-meta", "history").is_file() + ) + except OSError: + return False + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> bool | None: + if collection_path.name == "__pycache__": + return True + + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + + if collection_path.is_dir(): + norecursepatterns = config.getini("norecursedirs") + if any(fnmatch_ex(pat, collection_path) for pat in norecursepatterns): + return True + + return None + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + return Dir.from_parent(parent, path=path) + + +def pytest_collection_modifyitems(items: list[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__( + self, + pm: PytestPluginManager, + remove_mods: AbstractSet[object], + ) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str) -> pluggy.HookCaller: + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@dataclasses.dataclass +class _bestrelpath_cache(dict[Path, str]): + __slots__ = ("path",) + + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Dir(nodes.Directory): + """Collector of files in a file system directory. + + .. versionadded:: 8.0 + + .. note:: + + Python directories with an `__init__.py` file are instead collected by + :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` + collectors. + """ + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: nodes.Collector, + *, + path: Path, + ) -> Self: + """The public constructor. + + :param parent: The parent collector of this Dir. + :param path: The directory's path. + :type path: pathlib.Path + """ + return super().from_parent(parent=parent, path=path) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +@final +class Session(nodes.Collector): + """The root of the collection tree. + + ``Session`` collects the initial paths given as arguments to pytest. + """ + + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: int | ExitCode + + def __init__(self, config: Config) -> None: + super().__init__( + name="", + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self._shouldstop: bool | str = False + self._shouldfail: bool | str = False + self.trace = config.trace.root.get("collection") + self._initialpaths: frozenset[Path] = frozenset() + self._initialpaths_with_parents: frozenset[Path] = frozenset() + self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: list[CollectionArgument] = [] + self._collection_cache: dict[nodes.Collector, CollectReport] = {} + self.items: list[nodes.Item] = [] + + self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> Session: + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.name} " + f"exitstatus=%r " + f"testsfailed={self.testsfailed} " + f"testscollected={self.testscollected}>" + ) % getattr(self, "exitstatus", "") + + @property + def shouldstop(self) -> bool | str: + return self._shouldstop + + @shouldstop.setter + def shouldstop(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldstop: + warnings.warn( + PytestWarning( + "session.shouldstop cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldstop = value + + @property + def shouldfail(self) -> bool | str: + return self._shouldfail + + @shouldfail.setter + def shouldfail(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldfail: + warnings.warn( + PytestWarning( + "session.shouldfail cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldfail = value + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = f"stopping after {self.testsfailed} failures" + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath( + self, + path: str | os.PathLike[str], + *, + with_parents: bool = False, + ) -> bool: + """Is path an initial path? + + An initial path is a path explicitly given to pytest on the command + line. + + :param with_parents: + If set, also return True if the path is a parent of an initial path. + + .. versionchanged:: 8.0 + Added the ``with_parents`` parameter. + """ + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + if with_parents: + return path_ in self._initialpaths_with_parents + else: + return path_ in self._initialpaths + + def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules(path) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + proxy: pluggy.HookRelay + if remove_mods: + # One or more conftests are not in use at this path. + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _collect_path( + self, + path: Path, + path_cache: dict[Path, Sequence[nodes.Collector]], + ) -> Sequence[nodes.Collector]: + """Create a Collector for the given path. + + `path_cache` makes it so the same Collectors are returned for the same + path. + """ + if path in path_cache: + return path_cache[path] + + if path.is_dir(): + ihook = self.gethookproxy(path.parent) + col: nodes.Collector | None = ihook.pytest_collect_directory( + path=path, parent=self + ) + cols: Sequence[nodes.Collector] = (col,) if col is not None else () + + elif path.is_file(): + ihook = self.gethookproxy(path) + cols = ihook.pytest_collect_file(file_path=path, parent=self) + + else: + # Broken symlink or invalid/missing file. + cols = () + + path_cache[path] = cols + return cols + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: Literal[True] = ... + ) -> Sequence[nodes.Item]: ... + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: bool = ... + ) -> Sequence[nodes.Item | nodes.Collector]: ... + + def perform_collect( + self, args: Sequence[str] | None = None, genitems: bool = True + ) -> Sequence[nodes.Item | nodes.Collector]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + hook = self.config.hook + + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + self.items = [] + items: Sequence[nodes.Item | nodes.Collector] = self.items + consider_namespace_packages: bool = self.config.getini( + "consider_namespace_packages" + ) + try: + initialpaths: list[Path] = [] + initialpaths_with_parents: list[Path] = [] + + collection_args = [ + resolve_collection_argument( + self.config.invocation_params.dir, + arg, + i, + as_pypath=self.config.option.pyargs, + consider_namespace_packages=consider_namespace_packages, + ) + for i, arg in enumerate(args) + ] + + if not self.config.getoption("keepduplicates"): + # Normalize the collection arguments -- remove duplicates and overlaps. + self._initial_parts = normalize_collection_arguments(collection_args) + else: + self._initial_parts = collection_args + + for collection_argument in self._initial_parts: + initialpaths.append(collection_argument.path) + initialpaths_with_parents.append(collection_argument.path) + initialpaths_with_parents.extend(collection_argument.path.parents) + self._initialpaths = frozenset(initialpaths) + self._initialpaths_with_parents = frozenset(initialpaths_with_parents) + + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, collectors in self._notfound: + if collectors: + errors.append( + f"not found: {arg}\n(no match in any of {collectors!r})" + ) + else: + errors.append(f"found no collectors for {arg}") + + raise UsageError(*errors) + + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + hook.pytest_collection_finish(session=self) + + if genitems: + self.testscollected = len(items) + + return items + + def _collect_one_node( + self, + node: nodes.Collector, + handle_dupes: bool = True, + ) -> tuple[CollectReport, bool]: + if node in self._collection_cache and handle_dupes: + rep = self._collection_cache[node] + return rep, True + else: + rep = collect_one_node(node) + self._collection_cache[node] = rep + return rep, False + + def collect(self) -> Iterator[nodes.Item | nodes.Collector]: + # This is a cache for the root directories of the initial paths. + # We can't use collection_cache for Session because of its special + # role as the bootstrapping collector. + path_cache: dict[Path, Sequence[nodes.Collector]] = {} + + pm = self.config.pluginmanager + + for collection_argument in self._initial_parts: + self.trace("processing argument", collection_argument) + self.trace.root.indent += 1 + + argpath = collection_argument.path + names = collection_argument.parts + parametrization = collection_argument.parametrization + module_name = collection_argument.module_name + + # resolve_collection_argument() ensures this. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + paths = [argpath] + # Add relevant parents of the path, from the root, e.g. + # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] + if module_name is None: + # Paths outside of the confcutdir should not be considered. + for path in argpath.parents: + if not pm._is_in_confcutdir(path): + break + paths.insert(0, path) + else: + # For --pyargs arguments, only consider paths matching the module + # name. Paths beyond the package hierarchy are not included. + module_name_parts = module_name.split(".") + for i, path in enumerate(argpath.parents, 2): + if i > len(module_name_parts) or path.stem != module_name_parts[-i]: + break + paths.insert(0, path) + + # Start going over the parts from the root, collecting each level + # and discarding all nodes which don't match the level's part. + any_matched_in_initial_part = False + notfound_collectors = [] + work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ + (self, [*paths, *names]) + ] + while work: + matchnode, matchparts = work.pop() + + # Pop'd all of the parts, this is a match. + if not matchparts: + yield matchnode + any_matched_in_initial_part = True + continue + + # Should have been matched by now, discard. + if not isinstance(matchnode, nodes.Collector): + continue + + # Collect this level of matching. + # Collecting Session (self) is done directly to avoid endless + # recursion to this function. + subnodes: Sequence[nodes.Collector | nodes.Item] + if isinstance(matchnode, Session): + assert isinstance(matchparts[0], Path) + subnodes = matchnode._collect_path(matchparts[0], path_cache) + else: + # For backward compat, files given directly multiple + # times on the command line should not be deduplicated. + handle_dupes = not ( + len(matchparts) == 1 + and isinstance(matchparts[0], Path) + and matchparts[0].is_file() + ) + rep, duplicate = self._collect_one_node(matchnode, handle_dupes) + if not duplicate and not rep.passed: + # Report collection failures here to avoid failing to + # run some test specified in the command line because + # the module could not be imported (#134). + matchnode.ihook.pytest_collectreport(report=rep) + if not rep.passed: + continue + subnodes = rep.result + + # Prune this level. + any_matched_in_collector = False + for node in reversed(subnodes): + # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. + if isinstance(matchparts[0], Path): + is_match = node.path == matchparts[0] + if sys.platform == "win32" and not is_match: + # In case the file paths do not match, fallback to samefile() to + # account for short-paths on Windows (#11895). But use a version + # which doesn't resolve symlinks, otherwise we might match the + # same file more than once (#12039). + is_match = samefile_nofollow(node.path, matchparts[0]) + + # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. + else: + if len(matchparts) == 1: + # This the last part, one parametrization goes. + if parametrization is not None: + # A parametrized arg must match exactly. + is_match = node.name == matchparts[0] + parametrization + else: + # A non-parameterized arg matches all parametrizations (if any). + # TODO: Remove the hacky split once the collection structure + # contains parametrization. + is_match = node.name.split("[")[0] == matchparts[0] + else: + is_match = node.name == matchparts[0] + if is_match: + work.append((node, matchparts[1:])) + any_matched_in_collector = True + + if not any_matched_in_collector: + notfound_collectors.append(matchnode) + + if not any_matched_in_initial_part: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, notfound_collectors)) + + self.trace.root.indent -= 1 + + def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + # For backward compat, dedup only applies to files. + handle_dupes = not isinstance(node, nodes.File) + rep, duplicate = self._collect_one_node(node, handle_dupes) + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + if not duplicate: + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath( + module_name: str, *, consider_namespace_packages: bool = False +) -> str | None: + """Search sys.path for the given a dotted module name, and return its file + system path if found.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return None + + if spec is None: + return None + + if ( + spec.submodule_search_locations is None + or len(spec.submodule_search_locations) == 0 + ): + # Must be a simple module. + return spec.origin + + if consider_namespace_packages: + # If submodule_search_locations is set, it's a package (regular or namespace). + # Typically there is a single entry, but documentation claims it can be empty too + # (e.g. if the package has no physical location). + return spec.submodule_search_locations[0] + + if spec.origin is None: + # This is only the case for namespace packages + return None + + return os.path.dirname(spec.origin) + + +@dataclasses.dataclass(frozen=True) +class CollectionArgument: + """A resolved collection argument.""" + + path: Path + parts: Sequence[str] + parametrization: str | None + module_name: str | None + original_index: int + + +def resolve_collection_argument( + invocation_path: Path, + arg: str, + arg_index: int, + *, + as_pypath: bool = False, + consider_namespace_packages: bool = False, +) -> CollectionArgument: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a resolved `CollectionArgument`: + + CollectionArgument( + path=Path("/full/path/to/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name=None, + ) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo[a,b]" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module, which may look like this: + + CollectionArgument( + path=Path("/home/u/myvenv/lib/site-packages/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + parametrization="[a,b]", + module_name="pkg.tests.test_foo", + ) + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = arg.partition("[") + strpath, *parts = base.split("::") + if squacket and not parts: + raise UsageError(f"path cannot contain [] parametrization: {arg}") + parametrization = f"{squacket}{rest}" if squacket else None + module_name = None + if as_pypath: + pyarg_strpath = search_pypath( + strpath, consider_namespace_packages=consider_namespace_packages + ) + if pyarg_strpath is not None: + module_name = strpath + strpath = pyarg_strpath + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not safe_exists(fspath): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return CollectionArgument( + path=fspath, + parts=parts, + parametrization=parametrization, + module_name=module_name, + original_index=arg_index, + ) + + +def is_collection_argument_subsumed_by( + arg: CollectionArgument, by: CollectionArgument +) -> bool: + """Check if `arg` is subsumed (contained) by `by`.""" + # First check path subsumption. + if by.path != arg.path: + # `by` subsumes `arg` if `by` is a parent directory of `arg` and has no + # parts (collects everything in that directory). + if not by.parts: + return arg.path.is_relative_to(by.path) + return False + # Paths are equal, check parts. + # For example: ("TestClass",) is a prefix of ("TestClass", "test_method"). + if len(by.parts) > len(arg.parts) or arg.parts[: len(by.parts)] != by.parts: + return False + # Paths and parts are equal, check parametrization. + # A `by` without parametrization (None) matches everything, e.g. + # `pytest x.py::test_it` matches `x.py::test_it[0]`. Otherwise must be + # exactly equal. + if by.parametrization is not None and by.parametrization != arg.parametrization: + return False + return True + + +def normalize_collection_arguments( + collection_args: Sequence[CollectionArgument], +) -> list[CollectionArgument]: + """Normalize collection arguments to eliminate overlapping paths and parts. + + Detects when collection arguments overlap in either paths or parts and only + keeps the shorter prefix, or the earliest argument if duplicate, preserving + order. The result is prefix-free. + """ + # A quadratic algorithm is not acceptable since large inputs are possible. + # So this uses an O(n*log(n)) algorithm which takes advantage of the + # property that after sorting, a collection argument will immediately + # precede collection arguments it subsumes. An O(n) algorithm is not worth + # it. + collection_args_sorted = sorted( + collection_args, + key=lambda arg: (arg.path, arg.parts, arg.parametrization or ""), + ) + normalized: list[CollectionArgument] = [] + last_kept = None + for arg in collection_args_sorted: + if last_kept is None or not is_collection_argument_subsumed_by(arg, last_kept): + normalized.append(arg) + last_kept = arg + normalized.sort(key=lambda arg: arg.original_index) + return normalized diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..841d781 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,301 @@ +"""Generic mechanism for marking and selecting python functions.""" + +from __future__ import annotations + +import collections +from collections.abc import Collection +from collections.abc import Iterable +from collections.abc import Set as AbstractSet +import dataclasses +from typing import TYPE_CHECKING + +from .expression import Expression +from .structures import _HiddenParam +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import HIDDEN_PARAM +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import NOT_SET +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "HIDDEN_PARAM", + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Config | None]() + + +def param( + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | _HiddenParam | None = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + + :param marks: + A single mark or a list of marks to be applied to this parameter set. + + :ref:`pytest.mark.usefixtures ` cannot be added via this parameter. + + :type id: str | Literal[pytest.HIDDEN_PARAM] | None + :param id: + The id to attribute to this parameter set. + + .. versionadded:: 8.4 + :ref:`hidden-param` means to hide the parameter set + from the test name. Can only be used at most 1 time, as + test names need to be unique. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( # private to use reserved lower-case short option + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Only run tests which match the given substring expression. " + "An expression is a Python evaluable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( # private to use reserved lower-case short option + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="Only run tests matching given mark expression. " + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "Register new markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write(f"@pytest.mark.{name}:", bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@dataclasses.dataclass +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + __slots__ = ("_names",) + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: Item) -> KeywordMatcher: + mapped_names = set() + + # Add the names of the current item and any parent items, + # except the Session and root Directory's which are not + # interesting for matching. + import pytest + + for node in item.listchain(): + if isinstance(node, pytest.Session): + continue + if isinstance(node, pytest.Directory) and isinstance( + node.parent, pytest.Session + ): + continue + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str, /, **kwargs: str | int | bool | None) -> bool: + if kwargs: + raise UsageError("Keyword expressions do not support call parameters.") + subname = subname.lower() + return any(subname in name.lower() for name in self._names) + + +def deselect_by_keyword(items: list[Item], config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@dataclasses.dataclass +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + __slots__ = ("own_mark_name_mapping",) + + own_mark_name_mapping: dict[str, list[Mark]] + + @classmethod + def from_markers(cls, markers: Iterable[Mark]) -> MarkMatcher: + mark_name_mapping = collections.defaultdict(list) + for mark in markers: + mark_name_mapping[mark.name].append(mark) + return cls(mark_name_mapping) + + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: + if not (matches := self.own_mark_name_mapping.get(name, [])): + return False + + for mark in matches: # pylint: disable=consider-using-any-or-all + if all(mark.kwargs.get(k, NOT_SET) == v for k, v in kwargs.items()): + return True + return False + + +def deselect_by_mark(items: list[Item], config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: list[Item] = [] + deselected: list[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_markers(item.iter_markers())): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except SyntaxError as e: + raise UsageError( + f"{exc_message}: {e.text}: at column {e.offset}: {e.msg}" + ) from None + + +def pytest_collection_modifyitems(items: list[Item], config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + f"{EMPTY_PARAMETERSET_OPTION!s} must be one of skip, xfail or fail_at_collect" + f" but it is {empty_parameterset!r}" + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/expression.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000..3bdbd03 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/expression.py @@ -0,0 +1,353 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident kwargs? + +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ +kwargs: ('(' name '=' value ( ', ' name '=' value )* ')') +name: a valid ident, but not a reserved keyword +value: (unescaped) string literal | (-)?[0-9]+ | 'False' | 'True' | 'None' + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True or False according to a provided matcher function. +- ident with parentheses and keyword arguments evaluates to True or False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +""" + +from __future__ import annotations + +import ast +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import enum +import keyword +import re +import types +from typing import Final +from typing import final +from typing import Literal +from typing import NoReturn +from typing import overload +from typing import Protocol + + +__all__ = [ + "Expression", + "ExpressionMatcher", +] + + +FILE_NAME: Final = "" + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + EQUAL = "=" + STRING = "string literal" + COMMA = "," + + +@dataclasses.dataclass(frozen=True) +class Token: + __slots__ = ("pos", "type", "value") + type: TokenType + value: str + pos: int + + +class Scanner: + __slots__ = ("current", "input", "tokens") + + def __init__(self, input: str) -> None: + self.input = input + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + elif input[pos] == "=": + yield Token(TokenType.EQUAL, "=", pos) + pos += 1 + elif input[pos] == ",": + yield Token(TokenType.COMMA, ",", pos) + pos += 1 + elif (quote_char := input[pos]) in ("'", '"'): + end_quote_pos = input.find(quote_char, pos + 1) + if end_quote_pos == -1: + raise SyntaxError( + f'closing quote "{quote_char}" is missing', + (FILE_NAME, 1, pos + 1, input), + ) + value = input[pos : end_quote_pos + 1] + if (backslash_pos := input.find("\\")) != -1: + raise SyntaxError( + r'escaping with "\" not supported in marker expression', + (FILE_NAME, 1, backslash_pos + 1, input), + ) + yield Token(TokenType.STRING, value, pos) + pos += len(value) + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise SyntaxError( + f'unexpected character "{input[pos]}"', + (FILE_NAME, 1, pos + 1, input), + ) + yield Token(TokenType.EOF, "", pos) + + @overload + def accept(self, type: TokenType, *, reject: Literal[True]) -> Token: ... + + @overload + def accept( + self, type: TokenType, *, reject: Literal[False] = False + ) -> Token | None: ... + + def accept(self, type: TokenType, *, reject: bool = False) -> Token | None: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> NoReturn: + raise SyntaxError( + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + (FILE_NAME, 1, self.current.pos + 1, self.input), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.Constant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + name = ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + if s.accept(TokenType.LPAREN): + ret = ast.Call(func=name, args=[], keywords=all_kwargs(s)) + s.accept(TokenType.RPAREN, reject=True) + else: + ret = name + return ret + + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +BUILTIN_MATCHERS = {"True": True, "False": False, "None": None} + + +def single_kwarg(s: Scanner) -> ast.keyword: + keyword_name = s.accept(TokenType.IDENT, reject=True) + if not keyword_name.value.isidentifier(): + raise SyntaxError( + f"not a valid python identifier {keyword_name.value}", + (FILE_NAME, 1, keyword_name.pos + 1, s.input), + ) + if keyword.iskeyword(keyword_name.value): + raise SyntaxError( + f"unexpected reserved python keyword `{keyword_name.value}`", + (FILE_NAME, 1, keyword_name.pos + 1, s.input), + ) + s.accept(TokenType.EQUAL, reject=True) + + if value_token := s.accept(TokenType.STRING): + value: str | int | bool | None = value_token.value[1:-1] # strip quotes + else: + value_token = s.accept(TokenType.IDENT, reject=True) + if (number := value_token.value).isdigit() or ( + number.startswith("-") and number[1:].isdigit() + ): + value = int(number) + elif value_token.value in BUILTIN_MATCHERS: + value = BUILTIN_MATCHERS[value_token.value] + else: + raise SyntaxError( + f'unexpected character/s "{value_token.value}"', + (FILE_NAME, 1, value_token.pos + 1, s.input), + ) + + ret = ast.keyword(keyword_name.value, ast.Constant(value)) + return ret + + +def all_kwargs(s: Scanner) -> list[ast.keyword]: + ret = [single_kwarg(s)] + while s.accept(TokenType.COMMA): + ret.append(single_kwarg(s)) + return ret + + +class ExpressionMatcher(Protocol): + """A callable which, given an identifier and optional kwargs, should return + whether it matches in an :class:`Expression` evaluation. + + Should be prepared to handle arbitrary strings as input. + + If no kwargs are provided, the expression of the form `foo`. + If kwargs are provided, the expression is of the form `foo(1, b=True, "s")`. + + If the expression is not supported (e.g. don't want to accept the kwargs + syntax variant), should raise :class:`~pytest.UsageError`. + + Example:: + + def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool: + # Match `cat`. + if name == "cat" and not kwargs: + return True + # Match `dog(barks=True)`. + if name == "dog" and kwargs == {"barks": False}: + return True + return False + """ + + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ... + + +@dataclasses.dataclass +class MatcherNameAdapter: + matcher: ExpressionMatcher + name: str + + def __bool__(self) -> bool: + return self.matcher(self.name) + + def __call__(self, **kwargs: str | int | bool | None) -> bool: + return self.matcher(self.name, **kwargs) + + +class MatcherAdapter(Mapping[str, MatcherNameAdapter]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: ExpressionMatcher) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> MatcherNameAdapter: + return MatcherNameAdapter(matcher=self.matcher, name=key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +@final +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("_code", "input") + + def __init__(self, input: str, code: types.CodeType) -> None: + #: The original input line, as a string. + self.input: Final = input + self._code: Final = code + + @classmethod + def compile(cls, input: str) -> Expression: + """Compile a match expression. + + :param input: The input expression - one line. + + :raises SyntaxError: If the expression is malformed. + """ + astexpr = expression(Scanner(input)) + code = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(input, code) + + def evaluate(self, matcher: ExpressionMatcher) -> bool: + """Evaluate the match expression. + + :param matcher: + A callback which determines whether an identifier matches or not. + See the :class:`ExpressionMatcher` protocol for details and example. + + :returns: Whether the expression matches or not. + + :raises UsageError: + If the matcher doesn't support the expression. Cannot happen if the + matcher supports all expressions. + """ + return bool(eval(self._code, {"__builtins__": {}}, MatcherAdapter(matcher))) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/structures.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..16bb6d8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/mark/structures.py @@ -0,0 +1,664 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Collection +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import MutableMapping +from collections.abc import Sequence +import dataclasses +import enum +import inspect +from typing import Any +from typing import final +from typing import NamedTuple +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +from .._code import getfslineno +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.outcomes import fail +from _pytest.raises import AbstractRaises +from _pytest.scope import _ScopeName +from _pytest.warning_types import PytestUnknownMarkWarning + + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +# Singleton type for HIDDEN_PARAM, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class _HiddenParam(enum.Enum): + token = 0 + + +#: Can be used as a parameter set id to hide it from the test name. +HIDDEN_PARAM = _HiddenParam.token + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> MarkDecorator: + from ..nodes import Collector + + argslisting = ", ".join(argnames) + + _fs, lineno = getfslineno(func) + reason = f"got empty parameter set for ({argslisting})" + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + raise Collector.CollectError( + f"Empty parameter set in '{func.__name__}' at line {lineno + 1}" + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + """A set of values for a set of parameters along with associated marks and + an optional ID for the set. + + Examples:: + + pytest.param(1, 2, 3) + # ParameterSet(values=(1, 2, 3), marks=(), id=None) + + pytest.param("hello", id="greeting") + # ParameterSet(values=("hello",), marks=(), id="greeting") + + # Parameter set with marks + pytest.param(42, marks=pytest.mark.xfail) + # ParameterSet(values=(42,), marks=(MarkDecorator(...),), id=None) + + # From parametrize mark (parameter names + list of parameter sets) + pytest.mark.parametrize( + ("a", "b", "expected"), + [ + (1, 2, 3), + pytest.param(40, 2, 42, id="everything"), + ], + ) + # ParameterSet(values=(1, 2, 3), marks=(), id=None) + # ParameterSet(values=(40, 2, 42), marks=(), id="everything") + """ + + values: Sequence[object | NotSetType] + marks: Collection[MarkDecorator | Mark] + id: str | _HiddenParam | None + + @classmethod + def param( + cls, + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | _HiddenParam | None = None, + ) -> ParameterSet: + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + if any(i.name == "usefixtures" for i in marks): + raise ValueError( + "pytest.param cannot add pytest.mark.usefixtures; see " + "https://docs.pytest.org/en/stable/reference/reference.html#pytest-param" + ) + + if id is not None: + if not isinstance(id, str) and id is not HIDDEN_PARAM: + raise TypeError( + "Expected id to be a string or a `pytest.HIDDEN_PARAM` sentinel, " + f"got {type(id)}: {id!r}", + ) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: ParameterSet | Sequence[object] | object, + force_tuple: bool = False, + ) -> ParameterSet: + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *args, + **kwargs, + ) -> tuple[Sequence[str], bool]: + if isinstance(argnames, str): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[ParameterSet | Sequence[object] | object], + force_tuple: bool, + ) -> list[ParameterSet]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + func, + config: Config, + nodeid: str, + ) -> tuple[Sequence[str], list[ParameterSet]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet( + values=(NOTSET,) * len(argnames), marks=[mark], id="NOTSET" + ) + ) + return argnames, parameters + + +@final +@dataclasses.dataclass(frozen=True) +class Mark: + """A pytest mark.""" + + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Mark | None = dataclasses.field(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Sequence[str] | None = dataclasses.field( + default=None, repr=False + ) + + def __init__( + self, + name: str, + args: tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Mark | None = None, + param_ids_generated: Sequence[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: Mark) -> Mark: + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Mark | None = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Callable[..., object] | type) + + +@dataclasses.dataclass +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> MarkDecorator: + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + # For staticmethods/classmethods, the marks are eventually fetched from the + # function object, not the descriptor, so unwrap. + unwrapped_func = func + if isinstance(func, staticmethod | classmethod): + unwrapped_func = func.__func__ + if len(args) == 1 and (istestfunc(unwrapped_func) or is_class): + store_mark(unwrapped_func, self.mark, stacklevel=3) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks( + obj: object | type, + *, + consider_mro: bool = True, +) -> list[Mark]: + """Obtain the unpacked marks that are stored on an object. + + If obj is a class and consider_mro is true, return marks applied to + this class and all of its super-classes in MRO order. If consider_mro + is false, only return marks applied directly to this class. + """ + if isinstance(obj, type): + if not consider_mro: + mark_lists = [obj.__dict__.get("pytestmark", [])] + else: + mark_lists = [ + x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__) + ] + mark_list = [] + for item in mark_lists: + if isinstance(item, list): + mark_list.extend(item) + else: + mark_list.append(item) + else: + mark_attribute = getattr(obj, "pytestmark", []) + if isinstance(mark_attribute, list): + mark_list = mark_attribute + else: + mark_list = [mark_attribute] + return list(normalize_mark_list(mark_list)) + + +def normalize_mark_list( + mark_list: Iterable[Mark | MarkDecorator], +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {mark_obj!r} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark, *, stacklevel: int = 2) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + + from ..fixtures import getfixturemarker + + if getfixturemarker(obj) is not None: + warnings.warn(MARKED_FIXTURE, stacklevel=stacklevel) + + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__(self, reason: str = ...) -> MarkDecorator: ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: str | bool = ..., + *conditions: str | bool, + reason: str = ..., + ) -> MarkDecorator: ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__( + self, + condition: str | bool = False, + *conditions: str | bool, + reason: str = ..., + run: bool = ..., + raises: None + | type[BaseException] + | tuple[type[BaseException], ...] + | AbstractRaises[BaseException] = ..., + strict: bool = ..., + ) -> MarkDecorator: ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *, + indirect: bool | Sequence[str] = ..., + ids: Iterable[None | str | float | int | bool] + | Callable[[Any], object | None] + | None = ..., + scope: _ScopeName | None = ..., + ) -> MarkDecorator: ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Config | None = None + self._markers: set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + strict_markers = self._config.getini("strict_markers") + if strict_markers is None: + strict_markers = self._config.getini("strict") + if strict_markers: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + warnings.warn( + f"Unknown pytest.mark.{name} - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html", + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("_markers", "node", "parent") + + def __init__(self, node: Node) -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return key in self._markers or ( + self.parent is not None and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Mapping[str, Any] | Iterable[tuple[str, Any]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/monkeypatch.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..07cc3fc --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,435 @@ +# mypy: allow-untyped-defs +"""Monkeypatching and mocking functionality.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import MutableMapping +from contextlib import contextmanager +import os +from pathlib import Path +import re +import sys +from typing import Any +from typing import final +from typing import overload +from typing import TypeVar +import warnings + +from _pytest.deprecated import MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator[MonkeyPatch]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries, or + :data:`os.environ`: + + * :meth:`monkeypatch.setattr(obj, name, value, raising=True) ` + * :meth:`monkeypatch.delattr(obj, name, raising=True) ` + * :meth:`monkeypatch.setitem(mapping, name, value) ` + * :meth:`monkeypatch.delitem(obj, name, raising=True) ` + * :meth:`monkeypatch.setenv(name, value, prepend=None) ` + * :meth:`monkeypatch.delenv(name, raising=True) ` + * :meth:`monkeypatch.syspath_prepend(path) ` + * :meth:`monkeypatch.chdir(path) ` + * :meth:`monkeypatch.context() ` + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a :class:`KeyError` + or :class:`AttributeError` will be raised if the set/deletion operation does not have the + specified target. + + To undo modifications done by the fixture in a contained scope, + use :meth:`context() `. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + f"{type(obj).__name__!r} object at {ann} has no attribute {name!r}" + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + .. versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: list[tuple[object, str, object]] = [] + self._setitem: list[tuple[Mapping[Any, Any], object, object]] = [] + self._cwd: str | None = None + self._savesyspath: list[str] | None = None + + @classmethod + @contextmanager + def context(cls) -> Generator[MonkeyPatch]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: ... + + def setattr( + self, + target: str | object, + name: object | str, + value: object = notset, + raising: bool = True, + ) -> None: + """ + Set attribute value on target, memorizing the old value. + + For example: + + .. code-block:: python + + import os + + monkeypatch.setattr(os, "getcwd", lambda: "/") + + The code above replaces the :func:`os.getcwd` function by a ``lambda`` which + always returns ``"/"``. + + For convenience, you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name: + + .. code-block:: python + + monkeypatch.setattr("os.getcwd", lambda: "/") + + Raises :class:`AttributeError` if the attribute does not exist, unless + ``raising`` is set to False. + + **Where to patch** + + ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. + There can be many names pointing to any individual object, so for patching to work you must ensure + that you patch the name used by the system under test. + + See the section :ref:`Where to patch ` in the :mod:`unittest.mock` + docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but + applies to ``monkeypatch.setattr`` as well. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: object | str, + name: str | Notset = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dic[name] = value # type: ignore[index] + + def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dic[name] # type: ignore[attr-defined] + + def setenv(self, name: str, value: str, prepend: str | None = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + f"Value of environment variable {name} type should be str, but got " + f"{value!r} (type: {type(value).__name__}); converted to str implicitly" + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + import pkg_resources + from pkg_resources import fixup_namespace_packages + + # Only issue deprecation warning if this call would actually have an + # effect for this specific path. + if ( + hasattr(pkg_resources, "_namespace_packages") + and pkg_resources._namespace_packages + ): + path_obj = Path(str(path)) + for ns_pkg in pkg_resources._namespace_packages: + if ns_pkg is None: + continue + ns_pkg_path = path_obj / ns_pkg.replace(".", os.sep) + if ns_pkg_path.is_dir(): + warnings.warn( + MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES, stacklevel=2 + ) + break + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: str | os.PathLike[str]) -> None: + """Change the current working directory to the specified path. + + :param path: + The path to change into. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + .. note:: + The same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + + Prefer to use :meth:`context() ` instead. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dictionary[key] # type: ignore[attr-defined] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dictionary[key] = value # type: ignore[index] + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/nodes.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..6690f6a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/nodes.py @@ -0,0 +1,772 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import MutableMapping +from functools import cached_property +from functools import lru_cache +import os +import pathlib +from pathlib import Path +from typing import Any +from typing import cast +from typing import NoReturn +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +import pluggy + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._code.code import TracebackStyle +from _pytest.compat import LEGACY_PATH +from _pytest.compat import signature +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config.compat import _check_path +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + # Imported here due to circular import. + from _pytest.main import Session + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +_T = TypeVar("_T") + + +def _imply_path( + node_type: type[Node], + path: Path | None, + fspath: LEGACY_PATH | None, +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(abc.ABCMeta): + """Metaclass used by :class:`Node` to enforce that direct construction raises + :class:`Failed`. + + This behaviour supports the indirection introduced with :meth:`Node.from_parent`, + the named constructor to be used instead of direct construction. The design + decision to enforce indirection with :class:`NodeMeta` was made as a + temporary aid for refactoring the collection tree, which was diagnosed to + have :class:`Node` objects whose creational patterns were overly entangled. + Once the refactoring is complete, this metaclass can be removed. + + See https://github.com/pytest-dev/pytest/projects/3 for an overview of the + progress on detangling the :class:`Node` classes. + """ + + def __call__(cls, *k, **kw) -> NoReturn: + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{cls.__module__}.{cls.__name__}") + fail(msg, pytrace=False) + + def _create(cls: type[_T], *k, **kw) -> _T: + try: + return super().__call__(*k, **kw) # type: ignore[no-any-return,misc] + except TypeError: + sig = signature(getattr(cls, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{cls} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) # type: ignore[no-any-return,misc] + + +class Node(abc.ABC, metaclass=NodeMeta): + r"""Base class of :class:`Collector` and :class:`Item`, the components of + the test collection tree. + + ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the + leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo `. Will be deprecated in + #: a future release, prefer using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "__dict__", + "_nodeid", + "_store", + "config", + "name", + "parent", + "path", + "session", + ) + + def __init__( + self, + name: str, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + nodeid: str | None = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name: str = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session: Session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: list[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + self.stash: Stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: Node, **kw) -> Self: + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self) -> pluggy.HookRelay: + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + f"warning must be an instance of Warning or subclass, got {warning!r}" + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def iter_parents(self) -> Iterator[Node]: + """Iterate over all parent collectors starting from and including self + up to the root of the collection tree. + + .. versionadded:: 8.1 + """ + parent: Node | None = self + while parent is not None: + yield parent + parent = parent.parent + + def listchain(self) -> list[Node]: + """Return a list of all parent collectors starting from the root of the + collection tree down to and including self.""" + chain = [] + item: Node | None = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None: + """Dynamically add a marker object to the node. + + :param marker: + The marker. + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: str | None = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of the markers of the node. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: str | None = None + ) -> Iterator[tuple[Node, Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in self.iter_parents(): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Mark | None: ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: ... + + def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> list[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called without arguments when this node is + finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: type[_NodeType]) -> _NodeType | None: + """Get the closest parent node (including self) which is an instance of + the given class. + + :param cls: The node class to search for. + :returns: The node, if found. + """ + for node in self.iter_parents(): + if isinstance(node, cls): + return node + return None + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + return excinfo.traceback + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exception(excinfo.value.cause) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] + if self.config.getoption("fulltrace", False): + style = "long" + tbfilter = False + else: + tbfilter = self._traceback_filter + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.get_verbosity() > 1: + truncate_locals = False + else: + truncate_locals = True + + truncate_args = False if self.config.get_verbosity() > 2 else True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> str | TerminalRepr: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "path": just a path + + :rtype: A tuple of (str|Path, int) with filename and 0-based line number. + """ + # See Item.location. + location: tuple[str, int | None, str] | None = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "path", "unknown location"), -1 + + +class Collector(Node, abc.ABC): + """Base class of all collectors. + + Collector create children through `collect()` and thus iteratively build + the collection tree. + """ + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + @abc.abstractmethod + def collect(self) -> Iterable[Item | Collector]: + """Collect children (items and collectors) for this collector.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> str | TerminalRepr: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + return ntraceback.filter(excinfo) + return excinfo.traceback + + +@lru_cache(maxsize=1000) +def _check_initialpaths_for_relpath( + initial_paths: frozenset[Path], path: Path +) -> str | None: + if path in initial_paths: + return "" + + for parent in path.parents: + if parent in initial_paths: + return str(path.relative_to(parent)) + + return None + + +class FSCollector(Collector, abc.ABC): + """Base class for filesystem collectors.""" + + def __init__( + self, + fspath: LEGACY_PATH | None = None, + path_or_parent: Path | Node | None = None, + path: Path | None = None, + name: str | None = None, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session._initialpaths, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + **kw, + ) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + +class File(FSCollector, abc.ABC): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Directory(FSCollector, abc.ABC): + """Base class for collecting files from a directory. + + A basic directory collector does the following: goes over the files and + sub-directories in the directory and creates collectors for them by calling + the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, + after checking that they are not ignored using + :hook:`pytest_ignore_collect`. + + The default directory collectors are :class:`~pytest.Dir` and + :class:`~pytest.Package`. + + .. versionadded:: 8.0 + + :ref:`custom directory collectors`. + """ + + +class Item(Node, abc.ABC): + """Base class of all test invocation items. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: list[tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: list[tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + @abc.abstractmethod + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The 0-based line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> tuple[str, int | None, str]: + """ + Returns a tuple of ``(relfspath, lineno, testname)`` for this item + where ``relfspath`` is file path relative to ``config.rootpath`` + and lineno is a 0-based line number. + """ + location = self.reportinfo() + path = absolutepath(location[0]) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/outcomes.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..766be95 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/outcomes.py @@ -0,0 +1,308 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" + +from __future__ import annotations + +import sys +from typing import Any +from typing import ClassVar +from typing import NoReturn + +from .warning_types import PytestDeprecationWarning + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: str | None = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: str | None = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: int | None = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +class _Exit: + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. None means the same as ``0`` (no error), + same as :func:`sys.exit`. + + :raises pytest.exit.Exception: + The exception that is raised. + """ + + Exception: ClassVar[type[Exit]] = Exit + + def __call__(self, reason: str = "", returncode: int | None = None) -> NoReturn: + __tracebackhide__ = True + raise Exit(msg=reason, returncode=returncode) + + +exit: _Exit = _Exit() + + +class _Skip: + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level. + Raising the skip exception at module level will stop + the execution of the module and prevent the collection of all tests in the module, + even those defined before the `skip` call. + + Defaults to False. + + :raises pytest.skip.Exception: + The exception that is raised. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + + Exception: ClassVar[type[Skipped]] = Skipped + + def __call__(self, reason: str = "", allow_module_level: bool = False) -> NoReturn: + __tracebackhide__ = True + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +skip: _Skip = _Skip() + + +class _Fail: + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :raises pytest.fail.Exception: + The exception that is raised. + """ + + Exception: ClassVar[type[Failed]] = Failed + + def __call__(self, reason: str = "", pytrace: bool = True) -> NoReturn: + __tracebackhide__ = True + raise Failed(msg=reason, pytrace=pytrace) + + +fail: _Fail = _Fail() + + +class _XFail: + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + No other code is executed after using ``xfail()`` (it is implemented + internally by raising an exception). + + :param reason: + The message to show the user as reason for the xfail. + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + + :raises pytest.xfail.Exception: + The exception that is raised. + """ + + Exception: ClassVar[type[XFailed]] = XFailed + + def __call__(self, reason: str = "") -> NoReturn: + __tracebackhide__ = True + raise XFailed(msg=reason) + + +xfail: _XFail = _XFail() + + +def importorskip( + modname: str, + minversion: str | None = None, + reason: str | None = None, + *, + exc_type: type[ImportError] | None = None, +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param modname: + The name of the module to import. + :param minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param reason: + If given, this reason is shown as the message when the module cannot + be imported. + :param exc_type: + The exception that should be captured in order to skip modules. + Must be :py:class:`ImportError` or a subclass. + + If the module can be imported but raises :class:`ImportError`, pytest will + issue a warning to the user, as often users expect the module not to be + found (which would raise :class:`ModuleNotFoundError` instead). + + This warning can be suppressed by passing ``exc_type=ImportError`` explicitly. + + See :ref:`import-or-skip-import-error` for details. + + + :returns: + The imported module. This should be assigned to its canonical name. + + :raises pytest.skip.Exception: + If the module cannot be imported. + + Example:: + + docutils = pytest.importorskip("docutils") + + .. versionadded:: 8.2 + + The ``exc_type`` parameter. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + # Until pytest 9.1, we will warn the user if we catch ImportError (instead of ModuleNotFoundError), + # as this might be hiding an installation/environment problem, which is not usually what is intended + # when using importorskip() (#11523). + # In 9.1, to keep the function signature compatible, we just change the code below to: + # 1. Use `exc_type = ModuleNotFoundError` if `exc_type` is not given. + # 2. Remove `warn_on_import` and the warning handling. + if exc_type is None: + exc_type = ImportError + warn_on_import_error = True + else: + warn_on_import_error = False + + skipped: Skipped | None = None + warning: Warning | None = None + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + + try: + __import__(modname) + except exc_type as exc: + # Do not raise or issue warnings inside the catch_warnings() block. + if reason is None: + reason = f"could not import {modname!r}: {exc}" + skipped = Skipped(reason, allow_module_level=True) + + if warn_on_import_error and not isinstance(exc, ModuleNotFoundError): + lines = [ + "", + f"Module '{modname}' was found, but when imported by pytest it raised:", + f" {exc!r}", + "In pytest 9.1 this warning will become an error by default.", + "You can fix the underlying problem, or alternatively overwrite this behavior and silence this " + "warning by passing exc_type=ImportError explicitly.", + "See https://docs.pytest.org/en/stable/deprecations.html#pytest-importorskip-default-behavior-regarding-importerror", + ] + warning = PytestDeprecationWarning("\n".join(lines)) + + if warning: + warnings.warn(warning, stacklevel=2) + if skipped: + raise skipped + + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", + allow_module_level=True, + ) + return mod diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pastebin.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..c7b39d9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pastebin.py @@ -0,0 +1,117 @@ +# mypy: allow-untyped-defs +"""Submit failure or test session information to a pastebin service.""" + +from __future__ import annotations + +from io import StringIO +import tempfile +from typing import IO + +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="Send failed|all info to bpaste.net pastebin service", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line(f"pastebin session-log: {pastebinurl}\n") + + +def create_new_paste(contents: str | bytes) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.error import HTTPError + from urllib.parse import urlencode + from urllib.request import urlopen + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except HTTPError as e: + with e: # HTTPErrors are also http responses that must be closed! + return f"bad response: {e}" + except OSError as e: # eg urllib.error.URLError + return f"bad response: {e}" + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pathlib.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..cd15434 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pathlib.py @@ -0,0 +1,1063 @@ +from __future__ import annotations + +import atexit +from collections.abc import Callable +from collections.abc import Iterable +from collections.abc import Iterator +import contextlib +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +import fnmatch +from functools import partial +from importlib.machinery import ModuleSpec +from importlib.machinery import PathFinder +import importlib.util +import itertools +import os +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +import shutil +import sys +import types +from types import ModuleType +from typing import Any +from typing import TypeVar +import uuid +import warnings + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + + +if sys.version_info < (3, 11): + from importlib._bootstrap_external import _NamespaceLoader as NamespaceLoader +else: + from importlib.machinery import NamespaceLoader + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception: Exception) -> bool: + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error( + func: Callable[..., Any] | None, + path: str, + excinfo: BaseException + | tuple[type[BaseException], BaseException, types.TracebackType | None], + *, + start_path: Path, +) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + if isinstance(excinfo, BaseException): + exc = excinfo + else: + exc = excinfo[1] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(exc, FileNotFoundError): + return False + + if not isinstance(exc, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}" + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + if sys.version_info >= (3, 12): + shutil.rmtree(str(path), onexc=onerror) + else: + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[os.DirEntry[str]]: + """Find all elements in root that begin with the prefix, case-insensitive.""" + l_prefix = prefix.lower() + for x in os.scandir(root): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[os.DirEntry[str]], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for entry in iter: + yield entry.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num: str) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink(root: Path, target: str | PurePath, link_to: str | Path) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + f"{prefix} in {root} after 10 tries" + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal( + lock_path: Path, register: Any = atexit.register +) -> Any: + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + entries = find_prefixed(root, prefix) + entries, entries2 = itertools.tee(entries) + numbers = map(parse_num, extract_suffixes(entries2, prefix)) + for entry, number in zip(entries, numbers, strict=True): + if number <= max_delete: + yield Path(entry) + + +def cleanup_dead_symlinks(root: Path) -> None: + for left_dir in root.iterdir(): + if left_dir.is_symlink(): + if not left_dir.resolve().exists(): + left_dir.unlink() + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + if not root.exists(): + return + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + cleanup_dead_symlinks(root) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + # Only lock the current dir when keep is not 0 + if keep != 0: + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: str | os.PathLike[str]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip( + src: os.PathLike[str] | str, + dst: os.PathLike[str] | str, + **kwargs: Any, +) -> None: + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(src, dst, **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + path: str | os.PathLike[str], + *, + mode: str | ImportMode = ImportMode.prepend, + root: Path, + consider_namespace_packages: bool, +) -> ModuleType: + """ + Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + :param path: + Path to the file to import. + + :param mode: + Controls the underlying import mechanism that will be used: + + * ImportMode.prepend: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `importlib.import_module`. + + * ImportMode.append: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * ImportMode.importlib: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to muck with `sys.path` at all. It effectively + allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :param consider_namespace_packages: + If True, consider namespace packages when resolving module names. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + path = Path(path) + mode = ImportMode(mode) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + # Try to import this module using the standard import mechanisms, but + # without touching sys.path. + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pass + else: + # If the given module name is already in sys.modules, do not import it again. + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, pkg_root, insert_modules=False + ) + if mod is not None: + return mod + + # Could not import the module with the current sys.path, so we fall back + # to importing the file as a single module, not being a part of a package. + module_name = module_name_from_path(path, root) + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, path.parent, insert_modules=True + ) + if mod is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + return mod + + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pkg_root, module_name = path.parent, path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.sep + "__init__.py"): + module_file = module_file[: -(len(os.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +def _import_module_using_spec( + module_name: str, module_path: Path, module_location: Path, *, insert_modules: bool +) -> ModuleType | None: + """ + Tries to import a module by its canonical name, path, and its parent location. + + :param module_name: + The expected module name, will become the key of `sys.modules`. + + :param module_path: + The file path of the module, for example `/foo/bar/test_demo.py`. + If module is a package, pass the path to the `__init__.py` of the package. + If module is a namespace package, pass directory path. + + :param module_location: + The parent location of the module. + If module is a package, pass the directory containing the `__init__.py` file. + + :param insert_modules: + If True, will call `insert_missing_modules` to create empty intermediate modules + with made-up module names (when importing test files not reachable from `sys.path`). + + Example 1 of parent_module_*: + + module_name: "a.b.c.demo" + module_path: Path("a/b/c/demo.py") + module_location: Path("a/b/c/") + if "a.b.c" is package ("a/b/c/__init__.py" exists), then + parent_module_name: "a.b.c" + parent_module_path: Path("a/b/c/__init__.py") + parent_module_location: Path("a/b/c/") + else: + parent_module_name: "a.b.c" + parent_module_path: Path("a/b/c") + parent_module_location: Path("a/b/") + + Example 2 of parent_module_*: + + module_name: "a.b.c" + module_path: Path("a/b/c/__init__.py") + module_location: Path("a/b/c/") + if "a.b" is package ("a/b/__init__.py" exists), then + parent_module_name: "a.b" + parent_module_path: Path("a/b/__init__.py") + parent_module_location: Path("a/b/") + else: + parent_module_name: "a.b" + parent_module_path: Path("a/b/") + parent_module_location: Path("a/") + """ + # Attempt to import the parent module, seems is our responsibility: + # https://github.com/python/cpython/blob/73906d5c908c1e0b73c5436faeff7d93698fc074/Lib/importlib/_bootstrap.py#L1308-L1311 + parent_module_name, _, name = module_name.rpartition(".") + parent_module: ModuleType | None = None + if parent_module_name: + parent_module = sys.modules.get(parent_module_name) + # If the parent_module lacks the `__path__` attribute, AttributeError when finding a submodule's spec, + # requiring re-import according to the path. + need_reimport = not hasattr(parent_module, "__path__") + if parent_module is None or need_reimport: + # Get parent_location based on location, get parent_path based on path. + if module_path.name == "__init__.py": + # If the current module is in a package, + # need to leave the package first and then enter the parent module. + parent_module_path = module_path.parent.parent + else: + parent_module_path = module_path.parent + + if (parent_module_path / "__init__.py").is_file(): + # If the parent module is a package, loading by __init__.py file. + parent_module_path = parent_module_path / "__init__.py" + + parent_module = _import_module_using_spec( + parent_module_name, + parent_module_path, + parent_module_path.parent, + insert_modules=insert_modules, + ) + + # Checking with sys.meta_path first in case one of its hooks can import this module, + # such as our own assertion-rewrite hook. + for meta_importer in sys.meta_path: + module_name_of_meta = getattr(meta_importer.__class__, "__module__", "") + if module_name_of_meta == "_pytest.assertion.rewrite" and module_path.is_file(): + # Import modules in subdirectories by module_path + # to ensure assertion rewrites are not missed (#12659). + find_spec_path = [str(module_location), str(module_path)] + else: + find_spec_path = [str(module_location)] + + spec = meta_importer.find_spec(module_name, find_spec_path) + + if spec_matches_module_path(spec, module_path): + break + else: + loader = None + if module_path.is_dir(): + # The `spec_from_file_location` matches a loader based on the file extension by default. + # For a namespace package, need to manually specify a loader. + loader = NamespaceLoader(name, module_path, PathFinder()) # type: ignore[arg-type] + + spec = importlib.util.spec_from_file_location( + module_name, str(module_path), loader=loader + ) + + if spec_matches_module_path(spec, module_path): + assert spec is not None + # Find spec and import this module. + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + + # Set this module as an attribute of the parent module (#12194). + if parent_module is not None: + setattr(parent_module, name, mod) + + if insert_modules: + insert_missing_modules(sys.modules, module_name) + return mod + + return None + + +def spec_matches_module_path(module_spec: ModuleSpec | None, module_path: Path) -> bool: + """Return true if the given ModuleSpec can be used to import the given module path.""" + if module_spec is None: + return False + + if module_spec.origin: + return Path(module_spec.origin) == module_path + + # Compare the path with the `module_spec.submodule_Search_Locations` in case + # the module is part of a namespace package. + # https://docs.python.org/3/library/importlib.html#importlib.machinery.ModuleSpec.submodule_search_locations + if module_spec.submodule_search_locations: # can be None. + for path in module_spec.submodule_search_locations: + if Path(path) == module_path: + return True + + return False + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + # Module name for packages do not contain the __init__ file, unless + # the `__init__.py` file is at the root. + if len(path_parts) >= 2 and path_parts[-1] == "__init__": + path_parts = path_parts[:-1] + + # Module names cannot contain ".", normalize them to "_". This prevents + # a directory having a "." in the name (".env.310" for example) causing extra intermediate modules. + # Also, important to replace "." at the start of paths, as those are considered relative imports. + path_parts = tuple(x.replace(".", "_") for x in path_parts) + + return ".".join(path_parts) + + +def insert_missing_modules(modules: dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + parent_module_name, _, child_name = module_name.rpartition(".") + if parent_module_name: + parent_module = modules.get(parent_module_name) + if parent_module is None: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + parent_module = importlib.import_module(parent_module_name) + except ModuleNotFoundError: + parent_module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[parent_module_name] = parent_module + + # Add child attribute to the parent that can reference the child + # modules. + if not hasattr(parent_module, child_name): + setattr(parent_module, child_name, modules[module_name]) + + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Path | None: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it cannot be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not (parent / "__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def resolve_pkg_root_and_module_name( + path: Path, *, consider_namespace_packages: bool = False +) -> tuple[Path, str]: + """ + Return the path to the directory of the root package that contains the + given Python file, and its module name: + + src/ + app/ + __init__.py + core/ + __init__.py + models.py + + Passing the full path to `models.py` will yield Path("src") and "app.core.models". + + If consider_namespace_packages is True, then we additionally check upwards in the hierarchy + for namespace packages: + + https://packaging.python.org/en/latest/guides/packaging-namespace-packages + + Raises CouldNotResolvePathError if the given path does not belong to a package (missing any __init__.py files). + """ + pkg_root: Path | None = None + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + if consider_namespace_packages: + start = pkg_root if pkg_root is not None else path.parent + for candidate in (start, *start.parents): + module_name = compute_module_name(candidate, path) + if module_name and is_importable(module_name, path): + # Point the pkg_root to the root of the namespace package. + pkg_root = candidate + break + + if pkg_root is not None: + module_name = compute_module_name(pkg_root, path) + if module_name: + return pkg_root, module_name + + raise CouldNotResolvePathError(f"Could not resolve for {path}") + + +def is_importable(module_name: str, module_path: Path) -> bool: + """ + Return if the given module path could be imported normally by Python, akin to the user + entering the REPL and importing the corresponding module name directly, and corresponds + to the module_path specified. + + :param module_name: + Full module name that we want to check if is importable. + For example, "app.models". + + :param module_path: + Full path to the python module/package we want to check if is importable. + For example, "/projects/src/app/models.py". + """ + try: + # Note this is different from what we do in ``_import_module_using_spec``, where we explicitly search through + # sys.meta_path to be able to pass the path of the module that we want to import (``meta_importer.find_spec``). + # Using importlib.util.find_spec() is different, it gives the same results as trying to import + # the module normally in the REPL. + spec = importlib.util.find_spec(module_name) + except (ImportError, ValueError, ImportWarning): + return False + else: + return spec_matches_module_path(spec, module_path) + + +def compute_module_name(root: Path, module_path: Path) -> str | None: + """Compute a module name based on a path and a root anchor.""" + try: + path_without_suffix = module_path.with_suffix("") + except ValueError: + # Empty paths (such as Path.cwd()) might break meta_path hooks (like our own assertion rewriter). + return None + + try: + relative = path_without_suffix.relative_to(root) + except ValueError: # pragma: no cover + return None + names = list(relative.parts) + if not names: + return None + if names[-1] == "__init__": + names.pop() + return ".".join(names) + + +class CouldNotResolvePathError(Exception): + """Custom exception raised by resolve_pkg_root_and_module_name.""" + + +def scandir( + path: str | os.PathLike[str], + sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name, +) -> list[os.DirEntry[str]]: + """Scan a directory recursively, in breadth-first order. + + The returned entries are sorted according to the given key. + The default is to sort by name. + If the directory does not exist, return an empty list. + """ + entries = [] + # Attempt to create a scandir iterator for the given path. + try: + scandir_iter = os.scandir(path) + except FileNotFoundError: + # If the directory does not exist, return an empty list. + return [] + # Use the scandir iterator in a context manager to ensure it is properly closed. + with scandir_iter as s: + for entry in s: + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + # Reraise non-ignorable errors to avoid hiding issues. + raise + entries.append(entry) + entries.sort(key=sort_key) # type: ignore[arg-type] + return entries + + +def visit( + path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool] +) -> Iterator[os.DirEntry[str]]: + """Walk a directory recursively, in breadth-first order. + + The `recurse` predicate determines whether a directory is recursed. + + Entries at each directory level are sorted. + """ + entries = scandir(path) + yield from entries + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: str | os.PathLike[str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(path)) + + +def commonpath(path1: Path, path2: Path) -> Path | None: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +def safe_exists(p: Path) -> bool: + """Like Path.exists(), but account for input arguments that might be too long (#11394).""" + try: + return p.exists() + except (ValueError, OSError): + # ValueError: stat: path too long for Windows + # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect + return False + + +def samefile_nofollow(p1: Path, p2: Path) -> bool: + """Test whether two paths reference the same actual file or directory. + + Unlike Path.samefile(), does not resolve symlinks. + """ + return os.path.samestat(p1.lstat(), p2.lstat()) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/py.typed b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..1cd5f05 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester.py @@ -0,0 +1,1791 @@ +# mypy: allow-untyped-defs +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" + +from __future__ import annotations + +import collections.abc +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +import contextlib +from fnmatch import fnmatch +import gc +import importlib +from io import StringIO +import locale +import os +from pathlib import Path +import platform +import re +import shutil +import subprocess +import sys +import traceback +from typing import Any +from typing import Final +from typing import final +from typing import IO +from typing import Literal +from typing import overload +from typing import TextIO +from typing import TYPE_CHECKING +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestFDWarning + + +if TYPE_CHECKING: + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="Run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "Run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="Directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> list[tuple[str, str]]: + if sys.version_info >= (3, 11): + # New in Python 3.11, ignores utf-8 mode + encoding = locale.getencoding() + else: + encoding = locale.getpreferredencoding(False) + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + encoding=encoding, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]: + lines1 = self.get_open_files() + try: + return (yield) + finally: + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + f"***** {len(leaked_files)} FD leakage detected", + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + f"***** {len(leaked_files)} FD leakage detected", + "*** function {}:{}: {} ".format(*item.location), + "See issue #2366", + ] + item.warn(PytestFDWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> PytestArg: + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> HookRecorder: + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> list[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: list[RecordedHookCall] = [] + self.ret: int | ExitCode | None = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + # Since Python 3.13, f_locals is not a dict, but eval requires a dict. + backlocals = dict(sys._getframe(1).f_locals) + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([f" {x}" for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getreports( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: str | Iterable[str] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: str | None = None, + ) -> CollectReport | TestReport: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + f"could not find test report matching {inamepart!r}: " + "no test reports at all!" + ) + if len(values) > 1: + raise ValueError( + f"found 2 or more testreports matching {inamepart!r}: {values}" + ) + return values[0] + + @overload + def getfailures( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getfailures( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> list[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> LineComp: + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> type[LineMatcher]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: int | ExitCode, + outlines: list[str], + errlines: list[str], + duration: float, + ) -> None: + try: + self.ret: int | ExitCode = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + f"" + ) + + def parseoutcomes(self) -> dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class SysModulesSnapshot: + def __init__(self, preserve: Callable[[str], bool] | None = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to :attr:`path` and environment variables during initialization. + """ + + __test__ = False + + CLOSE_STDIN: Final = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( + WeakKeyDictionary() + ) + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + #: A list of plugins to use with :py:meth:`parseconfig` and + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. + #: + #: When running in subprocess mode, specify plugins by name (str) - adding + #: plugin objects directly is not supported. + self.plugins: list[str | _PluggyPlugin] = [] + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + self.chdir() + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory path used to create files/run tests from, etc.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + self._monkeypatch.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Any | bytes], + files: dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext is None: + raise TypeError("ext must not be None") + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Any | bytes) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + :returns: + The first created file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a conftest.py file. + + :param source: The contents. + :returns: The conftest.py file. + """ + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file. + + :param source: The contents. + :returns: The tox.ini file. + """ + return self.makefile(".ini", tox=source) + + def maketoml(self, source: str) -> Path: + """Write a pytest.toml file. + + :param source: The contents. + :returns: The pytest.toml file. + + .. versionadded:: 9.0 + """ + return self.makefile(".toml", pytest=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file. + + :param source: The contents. + :returns: The pyproject.ini file. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + + :param path: + The path. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str | os.PathLike[str]) -> Path: + """Create a new (sub)directory. + + :param name: + The name of the directory, relative to the pytester path. + :returns: + The created directory. + :rtype: pathlib.Path + """ + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str | os.PathLike[str]) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: str | None = None) -> Path: + """Copy file from project's directory into the testdir. + + :param name: + The name of the file to copy. + :return: + Path to the copied directory (inside ``self.path``). + :rtype: pathlib.Path + """ + example_dir_ = self._request.config.getini("pytester_example_dir") + if example_dir_ is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir: Path = self._request.config.rootpath / example_dir_ + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: + """Get the collection node of a file. + + :param config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param arg: + Path to the file. + :returns: + The node. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: + Path to the file. + :returns: + The node. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + :param colitems: + The collection nodes. + :returns: + The collected items. + """ + session = colitems[0].session + result: list[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + ``_pytest.runner.runtestprotocol``. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = [*list(cmdlineargs), p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: + """Run ``pytest.main(['--collect-only'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: str | os.PathLike[str], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + from _pytest.unraisableexception import gc_collect_iterations_key + + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class PytesterHelperPlugin: + @staticmethod + def pytest_configure(config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + # The unraisable plugin GC collect slows down inline + # pytester runs too much. + config.stash[gc_collect_iterations_key] = 0 + + plugins.append(PytesterHelperPlugin()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: str | os.PathLike[str], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + instant = timing.Instant() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), instant.elapsed().seconds + ) + res.reprec = reprec # type: ignore + return res + + def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[str | os.PathLike[str]] + ) -> list[str | os.PathLike[str]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append( + "--basetemp={}".format(self.path.parent.joinpath("basetemp")) + ) + return new_args + + def parseconfig(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest :class:`pytest.Config` instance from given + commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create a + new :py:class:`pytest.PytestPluginManager` and call the + :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` + instance. + + If :attr:`plugins` has been populated they should be plugin modules + to be registered with the plugin manager. + """ + import _pytest.config + + new_args = [str(x) for x in self._ensure_basetemp(args)] + + config = _pytest.config._prepareconfig(new_args, self.plugins) + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` + hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: str | os.PathLike[str], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + :returns: + The test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" + + def getitems(self, source: str | os.PathLike[str]) -> list[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: str | os.PathLike[str], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[str | os.PathLike[str]], + stdout: int | TextIO = subprocess.PIPE, + stderr: int | TextIO = subprocess.PIPE, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: str | os.PathLike[str], + timeout: float | None = None, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is ``CLOSE_STDIN`` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int + :returns: + The result. + + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + instant = timing.Instant() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = f"{timeout} second timeout expired running: {cmdargs}" + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + f1.flush() + f2.flush() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, instant.elapsed().seconds) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: os.PathLike[str]) -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: str | os.PathLike[str], timeout: float | None = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :returns: + The result. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = (f"--basetemp={p}", *args) + for plugin in self.plugins: + if not isinstance(plugin, str): + raise ValueError( + f"Specifying plugins as objects is not supported in pytester subprocess mode; " + f"specify by name instead: {plugin}" + ) + args = ("-p", plugin, *args) + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: list[str]) -> None: + self.lines = lines + self._log_output: list[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = f"line {line!r} not found in output" + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError(f"line {fnline!r} not found in output") + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log(f"{match_nickname}:", repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000..915cc8a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,74 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" + +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from __future__ import annotations + +from collections.abc import Sequence + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python.py new file mode 100644 index 0000000..e637518 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python.py @@ -0,0 +1,1772 @@ +# mypy: allow-untyped-defs +"""Python test discovery, setup and run of test functions.""" + +from __future__ import annotations + +import abc +from collections import Counter +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import enum +import fnmatch +from functools import partial +import inspect +import itertools +import os +from pathlib import Path +import re +import textwrap +import types +from typing import Any +from typing import cast +from typing import final +from typing import Literal +from typing import NoReturn +from typing import TYPE_CHECKING +import warnings + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import FuncFixtureInfo +from _pytest.fixtures import get_scope_node +from _pytest.main import Session +from _pytest.mark import ParameterSet +from _pytest.mark.structures import _HiddenParam +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import HIDDEN_PARAM +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import scandir +from _pytest.scope import _ScopeName +from _pytest.scope import Scope +from _pytest.stash import StashKey +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="Glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="Prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="Prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="Disable string escape non-ASCII characters, might cause unwanted " + "side effects(use at your own risk)", + ) + parser.addini( + "strict_parametrization_ids", + type="bool", + # None => fallback to `strict`. + default=None, + help="Emit an error if non-unique parameter set IDs are detected", + ) + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_fail(nodeid: str) -> None: + msg = ( + "async def functions are not natively supported.\n" + "You need to install a suitable plugin for your async framework, for example:\n" + " - anyio\n" + " - pytest-asyncio\n" + " - pytest-tornasync\n" + " - pytest-trio\n" + " - pytest-twisted" + ) + fail(msg, pytrace=False) + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_fail(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_fail(pyfuncitem.nodeid) + elif result is not None: + warnings.warn( + PytestReturnNotNoneWarning( + f"Test functions should return None, but {pyfuncitem.nodeid} returned {type(result)!r}.\n" + "Did you mean to use `assert` instead of `return`?\n" + "See https://docs.pytest.org/en/stable/how-to/assert.html#return-not-none for more information." + ) + ) + return True + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + pkginit = path / "__init__.py" + try: + has_pkginit = pkginit.is_file() + except PermissionError: + # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096. + return None + if has_pkginit: + return Package.from_parent(parent, path=path) + return None + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> Module: + return Module.from_parent(parent, path=module_path) + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]: + assert isinstance(collector, Class | Module), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + return Class.from_parent(collector, name=name, obj=obj) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + f"cannot collect {name!r} because it is not a function." + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if inspect.isgeneratorfunction(obj): + fail( + f"'yield' keyword is allowed in fixtures, but not in tests ({name})", + pytrace=False, + ) + return list(collector._genfunctions(name, obj)) + return None + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a class or a module. + """ + # Overridden by Function. + return None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + parts = [] + for node in self.iter_parents(): + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + # XXX caching? + path, lineno = getfslineno(self.obj) + modpath = self.getmodpath() + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector, abc.ABC): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod | classmethod): + # staticmethods and classmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + if not (self.classnamefilter(name) or self.isnosetest(obj)): + return False + if inspect.isabstract(obj): + return False + return True + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: set[str] = set() + dict_values: list[list[nodes.Item | nodes.Collector]] = [] + collect_imported_tests = self.session.config.getini("collect_imported_tests") + ihook = self.ihook + for dic in dicts: + values: list[nodes.Item | nodes.Collector] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + + if not collect_imported_tests and isinstance(self, Module): + # Do not collect functions and classes from other modules. + if inspect.isfunction(obj) or inspect.isclass(obj): + if obj.__module__ != self._getobj().__name__: + continue + + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator[Function]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = (clscol and clscol.obj) or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + metafunc._recompute_direct_params_indices() + # Direct parametrizations taking place in module/class-specific + # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure + # we update what the function really needs a.k.a its fixture closure. Note that + # direct parametrizations using `@pytest.mark.parametrize` have already been considered + # into making the closure using `ignore_args` arg to `getfixtureclosure`. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" if callspec._idlist else name + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +def importtestmodule( + path: Path, + config: Config, +): + # We assume we are only called once per module. + importmode = config.getoption("--import-mode") + try: + mod = import_path( + path, + mode=importmode, + root=config.rootpath, + consider_namespace_packages=config.getini("consider_namespace_packages"), + ) + except SyntaxError as e: + raise nodes.Collector.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise nodes.Collector.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(*e.args) + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if config.get_verbosity() < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise nodes.Collector.CollectError( + f"ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + f"{formatted_tb}" + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise nodes.Collector.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + config.pluginmanager.consider_module(mod) + return mod + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions in a Python module.""" + + def _getobj(self): + return importtestmodule(self.path, self.config) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + self._register_setup_module_fixture() + self._register_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _register_setup_module_fixture(self) -> None: + """Register an autouse, module-scoped fixture for the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + + if setup_module is None and teardown_module is None: + return + + def xunit_setup_module_fixture(request) -> Generator[None]: + module = request.module + if setup_module is not None: + _call_with_optional_argument(setup_module, module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, module) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + func=xunit_setup_module_fixture, + nodeid=self.nodeid, + scope="module", + autouse=True, + ) + + def _register_setup_function_fixture(self) -> None: + """Register an autouse, function-scoped fixture for the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + def xunit_setup_function_fixture(request) -> Generator[None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + function = request.function + if setup_function is not None: + _call_with_optional_argument(setup_function, function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + func=xunit_setup_function_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class Package(nodes.Directory): + """Collector for files and directories in a Python packages -- directories + with an `__init__.py` file. + + .. note:: + + Directories without an `__init__.py` file are instead collected by + :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` + collectors. + + .. versionchanged:: 8.0 + + Now inherits from :class:`~pytest.Directory`. + """ + + def __init__( + self, + fspath: LEGACY_PATH | None, + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path: Path | None = None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # super().__init__(self, fspath, parent=parent) + session = parent.session + super().__init__( + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + + def setup(self) -> None: + init_mod = importtestmodule(self.path / "__init__.py", self.config) + + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + init_mod, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, init_mod) + + teardown_module = _get_first_non_fixture_func( + init_mod, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, init_mod) + self.addfinalizer(func) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + # Always collect __init__.py first. + def sort_key(entry: os.DirEntry[str]) -> object: + return (entry.name != "__init__.py", entry.name) + + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path, sort_key): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice. + """ + for name in names: + meth: object | None = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods (and nested classes) in a Python class.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__init__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__new__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + + self._register_setup_class_fixture() + self._register_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _register_setup_class_fixture(self) -> None: + """Register an autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) + if setup_class is None and teardown_class is None: + return + + def xunit_setup_class_fixture(request) -> Generator[None]: + cls = request.cls + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, cls) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, cls) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + func=xunit_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_setup_method_fixture(self) -> None: + """Register an autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + if setup_method is None and teardown_method is None: + return + + def xunit_setup_method_fixture(request) -> Generator[None]: + instance = request.instance + method = request.function + if setup_method is not None: + func = getattr(instance, setup_name) + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(instance, teardown_name) + _call_with_optional_argument(func, method) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + func=xunit_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@dataclasses.dataclass(frozen=True) +class IdMaker: + """Make IDs for a parametrization.""" + + __slots__ = ( + "argnames", + "config", + "func_name", + "idfn", + "ids", + "nodeid", + "parametersets", + ) + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Callable[[Any], object | None] | None + # Optionally, explicit IDs for ParameterSets by index. + ids: Sequence[object | None] | None + # Optionally, the pytest config. + # Used for controlling ASCII escaping, determining parametrization ID + # strictness, and for calling the :hook:`pytest_make_parametrize_id` hook. + config: Config | None + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: str | None + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: str | None + + def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + If strict_parametrization_ids is enabled, and duplicates are detected, + raises CollectError. Otherwise makes the IDs unique as follows: + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + + if self._strict_parametrization_ids_enabled(): + parameters = ", ".join(self.argnames) + parametersets = ", ".join( + [saferepr(list(param.values)) for param in self.parametersets] + ) + ids = ", ".join( + id if id is not HIDDEN_PARAM else "" for id in resolved_ids + ) + duplicates = ", ".join( + id if id is not HIDDEN_PARAM else "" + for id, count in id_counts.items() + if count > 1 + ) + msg = textwrap.dedent(f""" + Duplicate parametrization IDs detected, but strict_parametrization_ids is set. + + Test name: {self.nodeid} + Parameters: {parameters} + Parameter sets: {parametersets} + IDs: {ids} + Duplicates: {duplicates} + + You can fix this problem using `@pytest.mark.parametrize(..., ids=...)` or `pytest.param(..., id=...)`. + """).strip() # noqa: E501 + raise nodes.Collector.CollectError(msg) + + # Map the ID to its next suffix. + id_suffixes: dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + if id is HIDDEN_PARAM: + self._complain_multiple_hidden_parameter_sets() + suffix = "" + if id and id[-1].isdigit(): + suffix = "_" + new_id = f"{id}{suffix}{id_suffixes[id]}" + while new_id in set(resolved_ids): + id_suffixes[id] += 1 + new_id = f"{id}{suffix}{id_suffixes[id]}" + resolved_ids[index] = new_id + id_suffixes[id] += 1 + assert len(resolved_ids) == len(set(resolved_ids)), ( + f"Internal error: {resolved_ids=}" + ) + return resolved_ids + + def _strict_parametrization_ids_enabled(self) -> bool: + if self.config is None: + return False + strict_parametrization_ids = self.config.getini("strict_parametrization_ids") + if strict_parametrization_ids is None: + strict_parametrization_ids = self.config.getini("strict") + return cast(bool, strict_parametrization_ids) + + def _resolve_ids(self) -> Iterable[str | _HiddenParam]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + if parameterset.id is HIDDEN_PARAM: + yield HIDDEN_PARAM + else: + yield _ascii_escaped_by_config(parameterset.id, self.config) + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + if self.ids[idx] is HIDDEN_PARAM: + yield HIDDEN_PARAM + else: + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip( + parameterset.values, self.argnames, strict=True + ) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> str | None: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: str | None = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> str | None: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, str | bytes): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, float | int | bool | complex): + return str(val) + elif isinstance(val, re.Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + prefix = self._make_error_prefix() + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + def _complain_multiple_hidden_parameter_sets(self) -> NoReturn: + fail( + f"{self._make_error_prefix()}multiple instances of HIDDEN_PARAM " + "cannot be used in the same parametrize call, " + "because the tests names need to be unique." + ) + + def _make_error_prefix(self) -> str: + if self.func_name is not None: + return f"In {self.func_name}: " + elif self.nodeid is not None: + return f"In {self.nodeid}: " + else: + return "" + + +@final +@dataclasses.dataclass(frozen=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to a fixture or pseudo-fixture + # of the same name. (indirect or direct parametrization respectively) + params: dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg index. + indices: dict[str, int] = dataclasses.field(default_factory=dict) + # arg name -> parameter scope. + # Used for sorting parametrized resources. + _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: Sequence[str] = dataclasses.field(default_factory=tuple) + # Marks which will be applied to the item. + marks: list[Mark] = dataclasses.field(default_factory=list) + + def setmulti( + self, + *, + argnames: Iterable[str], + valset: Iterable[object], + id: str | _HiddenParam, + marks: Iterable[Mark | MarkDecorator], + scope: Scope, + param_index: int, + nodeid: str, + ) -> CallSpec2: + params = self.params.copy() + indices = self.indices.copy() + arg2scope = dict(self._arg2scope) + for arg, val in zip(argnames, valset, strict=True): + if arg in params: + raise nodes.Collector.CollectError( + f"{nodeid}: duplicate parametrization of {arg!r}" + ) + params[arg] = val + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + params=params, + indices=indices, + _arg2scope=arg2scope, + _idlist=self._idlist if id is HIDDEN_PARAM else [*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +def get_direct_param_fixture_func(request: FixtureRequest) -> Any: + return request.param + + +# Used for storing pseudo fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[dict[str, FixtureDef[Any]]]() + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: FunctionDefinition, + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: list[CallSpec2] = [] + + self._params_directness: dict[str, Literal["indirect", "direct"]] = {} + + def parametrize( + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + indirect: bool | Sequence[str] = False, + ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, + scope: _ScopeName | None = None, + *, + _param_mark: Mark | None = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting ``indirect`` to do it at test setup time instead. + + Can be called multiple times per test function (but only on different + argument names), in which case each call parametrizes all previous + parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + .. versionadded:: 8.4 + :ref:`hidden-param` means to hide the parameter set + from the test name. Can only be used at most 1 time, as + test names need to be unique. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + nodeid = self.definition.nodeid + + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Calculate directness. + arg_directness = self._resolve_args_directness(argnames, indirect) + self._params_directness.update(arg_directness) + + # Add direct parametrizations as fixturedefs to arg2fixturedefs by + # registering artificial "pseudo" FixtureDef's such that later at test + # setup time we can rely on FixtureDefs to exist for all argnames. + node = None + # For scopes higher than function, a "pseudo" FixtureDef might have + # already been created for the scope. We thus store and cache the + # FixtureDef on the node related to the scope. + if scope_ is Scope.Function: + name2pseudofixturedef = None + else: + collector = self.definition.parent + assert collector is not None + node = get_scope_node(collector, scope_) + if node is None: + # If used class scope and there is no class, use module-level + # collector (for now). + if scope_ is Scope.Class: + assert isinstance(collector, Module) + node = collector + # If used package scope and there is no package, use session + # (for now). + elif scope_ is Scope.Package: + node = collector.session + else: + assert False, f"Unhandled missing scope: {scope}" + default: dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + for argname in argnames: + if arg_directness[argname] == "indirect": + continue + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + fixturedef = name2pseudofixturedef[argname] + else: + fixturedef = FixtureDef( + config=self.config, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=scope_, + params=None, + ids=None, + _ispytest=True, + ) + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + self._arg2fixturedefs[argname] = [fixturedef] + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets, strict=True) + ): + newcallspec = callspec.setmulti( + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + nodeid=nodeid, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Iterable[object | None] | Callable[[Any], object | None] | None, + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> list[str | _HiddenParam]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[object | None], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> list[object | None]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_args_directness( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> dict[str, Literal["indirect", "direct"]]: + """Resolve if each parametrized argument must be considered an indirect + parameter to a fixture of the same name, or a direct parameter to the + parametrized function, based on the ``indirect`` parameter of the + parametrized() call. + + :param argnames: + List of argument names passed to ``parametrize()``. + :param indirect: + Same as the ``indirect`` parameter of ``parametrize()``. + :returns + A dict mapping each arg name to either "indirect" or "direct". + """ + arg_directness: dict[str, Literal["indirect", "direct"]] + if isinstance(indirect, bool): + arg_directness = dict.fromkeys( + argnames, "indirect" if indirect else "direct" + ) + elif isinstance(indirect, Sequence): + arg_directness = dict.fromkeys(argnames, "direct") + for arg in indirect: + if arg not in argnames: + fail( + f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", + pytrace=False, + ) + arg_directness[arg] = "indirect" + else: + fail( + f"In {self.function.__name__}: expected Sequence or boolean" + f" for indirect, got {type(indirect).__name__}", + pytrace=False, + ) + return arg_directness + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + f"In {func_name}: function already takes an argument '{arg}' with a default value", + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + def _recompute_direct_params_indices(self) -> None: + for argname, param_type in self._params_directness.items(): + if param_type == "direct": + for i, callspec in enumerate(self._calls): + callspec.indices[argname] = i + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: bool | Sequence[str], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[-1]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +class Function(PyobjMixin, nodes.Item): + """Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Config | None = None, + callspec: CallSpec2 | None = None, + callobj=NOTSET, + keywords: Mapping[str, Any] | None = None, + session: Session | None = None, + fixtureinfo: FuncFixtureInfo | None = None, + originalname: str | None = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self._obj = callobj + self._instance = getattr(callobj, "__self__", None) + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + # todo: determine sound type limitations + @classmethod + def from_parent(cls, parent, **kw) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = fixtures.TopRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + @property + def instance(self): + try: + return self._instance + except AttributeError: + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + self._instance = self._getinstance() + else: + self._instance = None + return self._instance + + def _getinstance(self): + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + return self.parent.newinstance() + else: + return None + + def _getobj(self): + instance = self.instance + if instance is not None: + parent_obj = instance + else: + assert self.parent is not None + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + ntraceback = ntraceback.filter(excinfo) + + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(ntraceback) > 2: + ntraceback = Traceback( + ( + ntraceback[0], + *(t.with_repr_style("short") for t in ntraceback[1:-1]), + ntraceback[-1], + ) + ) + + return ntraceback + return excinfo.traceback + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """This class is a stop gap solution until we evolve to have actual function + definition nodes and manage to get rid of ``metafunc``.""" + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python_api.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..1e389eb --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/python_api.py @@ -0,0 +1,820 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Collection +from collections.abc import Mapping +from collections.abc import Sequence +from collections.abc import Sized +from decimal import Decimal +import math +from numbers import Complex +import pprint +import sys +from typing import Any +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from numpy import ndarray + + +def _compare_approx( + full_object: object, + message_data: Sequence[tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> list[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> list[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> ApproxScalar: + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_sequence_map(f, x): + """Recursively map a function over a sequence of arbitrary depth""" + if isinstance(x, list | tuple): + seq_type = type(x) + return seq_type(_recursive_sequence_map(f, xi) for xi in x) + elif _is_sequence_like(x): + return [_recursive_sequence_map(f, xi) for xi in x] + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: list[Any], nd_index: tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_seq = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + + # convert other_side to numpy array to ensure shape attribute is available + other_side_as_array = _as_numpy_array(other_side) + assert other_side_as_array is not None + + if np_array_shape != other_side_as_array.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side_as_array.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_seq, index) + other_value = get_value_from_nested_list(other_side_as_array, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side_as_array, index)), + str(get_value_from_nested_list(approx_side_as_seq, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return f"approx({ ({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})" + + def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare mappings with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + if set(self.expected.keys()) != set(other_side.keys()): + return [ + "comparison failed.", + f"Mappings has different keys: expected {self.expected.keys()} but got {other_side.keys()}", + ] + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values(), strict=True + ): + if approx_value != other_value: + if approx_value.expected is not None and other_value is not None: + try: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + if approx_value.expected == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max( + max_rel_diff, + abs( + (approx_value.expected - other_value) + / approx_value.expected + ), + ) + except ZeroDivisionError: + pass + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})" + + def _repr_compare(self, other_side: Sequence[float]) -> list[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side, strict=True) + ): + if approx_value != other_value: + try: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + # Ignore non-numbers for the diff calculations (#13012). + except TypeError: + pass + else: + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected, strict=True) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if ( + isinstance(self.expected, bool) + or (not isinstance(self.expected, Complex | Decimal)) + or math.isinf(abs(self.expected) or isinstance(self.expected, bool)) + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + if 1e-3 <= self.tolerance < 1e3: + vetted_tolerance = f"{self.tolerance:n}" + else: + vetted_tolerance = f"{self.tolerance:.1e}" + + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + + def is_bool(val: Any) -> bool: + # Check if `val` is a native bool or numpy bool. + if isinstance(val, bool): + return True + if np := sys.modules.get("numpy"): + return isinstance(val, np.bool_) + return False + + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality, except for bool and np.bool_ + if is_bool(self.expected) and not is_bool(actual): + return False + elif actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. Also, consider bool to be + # non-numeric, even though it has the required arithmetic. + if is_bool(self.expected) or not ( + isinstance(self.expected, Complex | Decimal) + and isinstance(actual, Complex | Decimal) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + __hash__ = None + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + def __repr__(self) -> str: + if isinstance(self.rel, float): + rel = Decimal.from_float(self.rel) + else: + rel = self.rel + + if isinstance(self.abs, float): + abs_ = Decimal.from_float(self.abs) + else: + abs_ = self.abs + + tol_str = "???" + if rel is not None and Decimal("1e-3") <= rel <= Decimal("1e3"): + tol_str = f"{rel:.1e}" + elif abs_ is not None: + tol_str = f"{abs_:.1e}" + + return f"{self.expected} ± {tol_str}" + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :doc:`python:tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + **Non-numeric types** + + You can also use ``approx`` to compare non-numeric types, or dicts and + sequences containing non-numeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + To match strings using regex, you can use + `Matches `_ + from the + `re_assert package `_. + + + .. note:: + + Unlike built-in equality, this function considers + booleans unequal to numeric zero or one. For example:: + + >>> 1 == approx(True) + False + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of non-numeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for non-numeric types instead + of raising ``TypeError``. + """ + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif _is_sequence_like(expected): + cls = ApproxSequenceLike + elif isinstance(expected, Collection) and not isinstance(expected, str | bytes): + msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_sequence_like(expected: object) -> bool: + return ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + and not isinstance(expected, str | bytes) + ) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> ndarray | None: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/raises.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/raises.py new file mode 100644 index 0000000..7c246fd --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/raises.py @@ -0,0 +1,1517 @@ +from __future__ import annotations + +from abc import ABC +from abc import abstractmethod +import re +from re import Pattern +import sys +from textwrap import indent +from typing import Any +from typing import cast +from typing import final +from typing import Generic +from typing import get_args +from typing import get_origin +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import warnings + +from _pytest._code import ExceptionInfo +from _pytest._code.code import stringify_exception +from _pytest.outcomes import fail +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Sequence + + # for some reason Sphinx does not play well with 'from types import TracebackType' + import types + from typing import TypeGuard + + from typing_extensions import ParamSpec + from typing_extensions import TypeVar + + P = ParamSpec("P") + + # this conditional definition is because we want to allow a TypeVar default + BaseExcT_co_default = TypeVar( + "BaseExcT_co_default", + bound=BaseException, + default=BaseException, + covariant=True, + ) + + # Use short name because it shows up in docs. + E = TypeVar("E", bound=BaseException, default=BaseException) +else: + from typing import TypeVar + + BaseExcT_co_default = TypeVar( + "BaseExcT_co_default", bound=BaseException, covariant=True + ) + +# RaisesGroup doesn't work with a default. +BaseExcT_co = TypeVar("BaseExcT_co", bound=BaseException, covariant=True) +BaseExcT_1 = TypeVar("BaseExcT_1", bound=BaseException) +BaseExcT_2 = TypeVar("BaseExcT_2", bound=BaseException) +ExcT_1 = TypeVar("ExcT_1", bound=Exception) +ExcT_2 = TypeVar("ExcT_2", bound=Exception) + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + from exceptiongroup import ExceptionGroup + + +# String patterns default to including the unicode flag. +_REGEX_NO_FLAGS = re.compile(r"").flags + + +# pytest.raises helper +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + *, + match: str | re.Pattern[str] | None = ..., + check: Callable[[E], bool] = ..., +) -> RaisesExc[E]: ... + + +@overload +def raises( + *, + match: str | re.Pattern[str], + # If exception_type is not provided, check() must do any typechecks itself. + check: Callable[[BaseException], bool] = ..., +) -> RaisesExc[BaseException]: ... + + +@overload +def raises(*, check: Callable[[BaseException], bool]) -> RaisesExc[BaseException]: ... + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> ExceptionInfo[E]: ... + + +def raises( + expected_exception: type[E] | tuple[type[E], ...] | None = None, + *args: Any, + **kwargs: Any, +) -> RaisesExc[BaseException] | ExceptionInfo[E]: + r"""Assert that a code block/function call raises an exception type, or one of its subclasses. + + :param expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. Note that subclasses of the passed exceptions + will also match. + + This is not a required parameter, you may opt to only use ``match`` and/or + ``check`` for verifying the raised exception. + + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + (This is only used when ``pytest.raises`` is used as a context manager, + and passed through to the function otherwise. + When using ``pytest.raises`` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + :kwparam Callable[[BaseException], bool] check: + + .. versionadded:: 8.4 + + If specified, a callable that will be called with the exception as a parameter + after checking the type and the match regex if specified. + If it returns ``True`` it will be considered a match, if not it will + be considered a failed match. + + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type, or any of its subclasses:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The ``match`` argument searches the formatted exception string, which includes any + `PEP-678 `__ ``__notes__``: + + >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP + ... e = ValueError("value must be 42") + ... e.add_note("had a note added") + ... raise e + + The ``check`` argument, if provided, must return True when passed the raised exception + for the match to be successful, otherwise an :exc:`AssertionError` is raised. + + >>> import errno + >>> with pytest.raises(OSError, check=lambda e: e.errno == errno.EACCES): + ... raise OSError(errno.EACCES, "no permission to view") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. warning:: + + Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: + + # Careful, this will catch ANY exception raised. + with pytest.raises(Exception): + some_function() + + Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide + real bugs, where the user wrote this expecting a specific exception, but some other exception is being + raised due to a bug introduced during a refactoring. + + Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch + **any** exception raised. + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # This will not execute. + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Expecting exception groups** + + When expecting exceptions wrapped in :exc:`BaseExceptionGroup` or + :exc:`ExceptionGroup`, you should instead use :class:`pytest.RaisesGroup`. + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + .. seealso:: + + :ref:`assertraises` for more examples and detailed discussion. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if not args: + if set(kwargs) - {"match", "check", "expected_exception"}: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + + if expected_exception is None: + return RaisesExc(**kwargs) + return RaisesExc(expected_exception, **kwargs) + + if not expected_exception: + raise ValueError( + f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " + f"Raising exceptions is already understood as failing the test, so you don't need " + f"any special code to say 'this should never raise an exception'." + ) + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with RaisesExc(expected_exception) as excinfo: + func(*args[1:], **kwargs) + try: + return excinfo + finally: + del excinfo + + +# note: RaisesExc/RaisesGroup uses fail() internally, so this alias +# indicates (to [internal] plugins?) that `pytest.raises` will +# raise `_pytest.outcomes.Failed`, where +# `outcomes.Failed is outcomes.fail.Exception is raises.Exception` +# note: this is *not* the same as `_pytest.main.Failed` +# note: mypy does not recognize this attribute, and it's not possible +# to use a protocol/decorator like the others in outcomes due to +# https://github.com/python/mypy/issues/18715 +raises.Exception = fail.Exception # type: ignore[attr-defined] + + +def _match_pattern(match: Pattern[str]) -> str | Pattern[str]: + """Helper function to remove redundant `re.compile` calls when printing regex""" + return match.pattern if match.flags == _REGEX_NO_FLAGS else match + + +def repr_callable(fun: Callable[[BaseExcT_1], bool]) -> str: + """Get the repr of a ``check`` parameter. + + Split out so it can be monkeypatched (e.g. by hypothesis) + """ + return repr(fun) + + +def backquote(s: str) -> str: + return "`" + s + "`" + + +def _exception_type_name( + e: type[BaseException] | tuple[type[BaseException], ...], +) -> str: + if isinstance(e, type): + return e.__name__ + if len(e) == 1: + return e[0].__name__ + return "(" + ", ".join(ee.__name__ for ee in e) + ")" + + +def _check_raw_type( + expected_type: type[BaseException] | tuple[type[BaseException], ...] | None, + exception: BaseException, +) -> str | None: + if expected_type is None or expected_type == (): + return None + + if not isinstance( + exception, + expected_type, + ): + actual_type_str = backquote(_exception_type_name(type(exception)) + "()") + expected_type_str = backquote(_exception_type_name(expected_type)) + if ( + isinstance(exception, BaseExceptionGroup) + and isinstance(expected_type, type) + and not issubclass(expected_type, BaseExceptionGroup) + ): + return f"Unexpected nested {actual_type_str}, expected {expected_type_str}" + return f"{actual_type_str} is not an instance of {expected_type_str}" + return None + + +def is_fully_escaped(s: str) -> bool: + # we know we won't compile with re.VERBOSE, so whitespace doesn't need to be escaped + metacharacters = "{}()+.*?^$[]" + return not any( + c in metacharacters and (i == 0 or s[i - 1] != "\\") for (i, c) in enumerate(s) + ) + + +def unescape(s: str) -> str: + return re.sub(r"\\([{}()+-.*?^$\[\]\s\\])", r"\1", s) + + +# These classes conceptually differ from ExceptionInfo in that ExceptionInfo is tied, and +# constructed from, a particular exception - whereas these are constructed with expected +# exceptions, and later allow matching towards particular exceptions. +# But there's overlap in `ExceptionInfo.match` and `AbstractRaises._check_match`, as with +# `AbstractRaises.matches` and `ExceptionInfo.errisinstance`+`ExceptionInfo.group_contains`. +# The interaction between these classes should perhaps be improved. +class AbstractRaises(ABC, Generic[BaseExcT_co]): + """ABC with common functionality shared between RaisesExc and RaisesGroup""" + + def __init__( + self, + *, + match: str | Pattern[str] | None, + check: Callable[[BaseExcT_co], bool] | None, + ) -> None: + if isinstance(match, str): + # juggle error in order to avoid context to fail (necessary?) + re_error = None + try: + self.match: Pattern[str] | None = re.compile(match) + except re.error as e: + re_error = e + if re_error is not None: + fail(f"Invalid regex pattern provided to 'match': {re_error}") + if match == "": + warnings.warn( + PytestWarning( + "matching against an empty string will *always* pass. If you want " + "to check for an empty message you need to pass '^$'. If you don't " + "want to match you should pass `None` or leave out the parameter." + ), + stacklevel=2, + ) + else: + self.match = match + + # check if this is a fully escaped regex and has ^$ to match fully + # in which case we can do a proper diff on error + self.rawmatch: str | None = None + if isinstance(match, str) or ( + isinstance(match, Pattern) and match.flags == _REGEX_NO_FLAGS + ): + if isinstance(match, Pattern): + match = match.pattern + if ( + match + and match[0] == "^" + and match[-1] == "$" + and is_fully_escaped(match[1:-1]) + ): + self.rawmatch = unescape(match[1:-1]) + + self.check = check + self._fail_reason: str | None = None + + # used to suppress repeated printing of `repr(self.check)` + self._nested: bool = False + + # set in self._parse_exc + self.is_baseexception = False + + def _parse_exc( + self, exc: type[BaseExcT_1] | types.GenericAlias, expected: str + ) -> type[BaseExcT_1]: + if isinstance(exc, type) and issubclass(exc, BaseException): + if not issubclass(exc, Exception): + self.is_baseexception = True + return exc + # because RaisesGroup does not support variable number of exceptions there's + # still a use for RaisesExc(ExceptionGroup[Exception]). + origin_exc: type[BaseException] | None = get_origin(exc) + if origin_exc and issubclass(origin_exc, BaseExceptionGroup): + exc_type = get_args(exc)[0] + if ( + issubclass(origin_exc, ExceptionGroup) and exc_type in (Exception, Any) + ) or ( + issubclass(origin_exc, BaseExceptionGroup) + and exc_type in (BaseException, Any) + ): + if not issubclass(origin_exc, ExceptionGroup): + self.is_baseexception = True + return cast(type[BaseExcT_1], origin_exc) + else: + raise ValueError( + f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseException]` " + f"are accepted as generic types but got `{exc}`. " + f"As `raises` will catch all instances of the specified group regardless of the " + f"generic argument specific nested exceptions has to be checked " + f"with `RaisesGroup`." + ) + # unclear if the Type/ValueError distinction is even helpful here + msg = f"Expected {expected}, but got " + if isinstance(exc, type): # type: ignore[unreachable] + raise ValueError(msg + f"{exc.__name__!r}") + if isinstance(exc, BaseException): # type: ignore[unreachable] + raise TypeError(msg + f"an exception instance: {type(exc).__name__}") + raise TypeError(msg + repr(type(exc).__name__)) + + @property + def fail_reason(self) -> str | None: + """Set after a call to :meth:`matches` to give a human-readable reason for why the match failed. + When used as a context manager the string will be printed as the reason for the + test failing.""" + return self._fail_reason + + def _check_check( + self: AbstractRaises[BaseExcT_1], + exception: BaseExcT_1, + ) -> bool: + if self.check is None: + return True + + if self.check(exception): + return True + + check_repr = "" if self._nested else " " + repr_callable(self.check) + self._fail_reason = f"check{check_repr} did not return True" + return False + + # TODO: harmonize with ExceptionInfo.match + def _check_match(self, e: BaseException) -> bool: + if self.match is None or re.search( + self.match, + stringified_exception := stringify_exception( + e, include_subexception_msg=False + ), + ): + return True + + # if we're matching a group, make sure we're explicit to reduce confusion + # if they're trying to match an exception contained within the group + maybe_specify_type = ( + f" the `{_exception_type_name(type(e))}()`" + if isinstance(e, BaseExceptionGroup) + else "" + ) + if isinstance(self.rawmatch, str): + # TODO: it instructs to use `-v` to print leading text, but that doesn't work + # I also don't know if this is the proper entry point, or tool to use at all + from _pytest.assertion.util import _diff_text + from _pytest.assertion.util import dummy_highlighter + + diff = _diff_text(self.rawmatch, stringified_exception, dummy_highlighter) + self._fail_reason = ("\n" if diff[0][0] == "-" else "") + "\n".join(diff) + return False + + self._fail_reason = ( + f"Regex pattern did not match{maybe_specify_type}.\n" + f" Expected regex: {_match_pattern(self.match)!r}\n" + f" Actual message: {stringified_exception!r}" + ) + if _match_pattern(self.match) == stringified_exception: + self._fail_reason += "\n Did you mean to `re.escape()` the regex?" + return False + + @abstractmethod + def matches( + self: AbstractRaises[BaseExcT_1], exception: BaseException + ) -> TypeGuard[BaseExcT_1]: + """Check if an exception matches the requirements of this AbstractRaises. + If it fails, :meth:`AbstractRaises.fail_reason` should be set. + """ + + +@final +class RaisesExc(AbstractRaises[BaseExcT_co_default]): + """ + .. versionadded:: 8.4 + + + This is the class constructed when calling :func:`pytest.raises`, but may be used + directly as a helper class with :class:`RaisesGroup` when you want to specify + requirements on sub-exceptions. + + You don't need this if you only want to specify the type, since :class:`RaisesGroup` + accepts ``type[BaseException]``. + + :param type[BaseException] | tuple[type[BaseException]] | None expected_exception: + The expected type, or one of several possible types. + May be ``None`` in order to only make use of ``match`` and/or ``check`` + + The type is checked with :func:`isinstance`, and does not need to be an exact match. + If that is wanted you can use the ``check`` parameter. + + :kwparam str | Pattern[str] match: + A regex to match. + + :kwparam Callable[[BaseException], bool] check: + If specified, a callable that will be called with the exception as a parameter + after checking the type and the match regex if specified. + If it returns ``True`` it will be considered a match, if not it will + be considered a failed match. + + :meth:`RaisesExc.matches` can also be used standalone to check individual exceptions. + + Examples:: + + with RaisesGroup(RaisesExc(ValueError, match="string")) + ... + with RaisesGroup(RaisesExc(check=lambda x: x.args == (3, "hello"))): + ... + with RaisesGroup(RaisesExc(check=lambda x: type(x) is ValueError)): + ... + """ + + # Trio bundled hypothesis monkeypatching, we will probably instead assume that + # hypothesis will handle that in their pytest plugin by the time this is released. + # Alternatively we could add a version of get_pretty_function_description ourselves + # https://github.com/HypothesisWorks/hypothesis/blob/8ced2f59f5c7bea3344e35d2d53e1f8f8eb9fcd8/hypothesis-python/src/hypothesis/internal/reflection.py#L439 + + # At least one of the three parameters must be passed. + @overload + def __init__( + self, + expected_exception: ( + type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] + ), + /, + *, + match: str | Pattern[str] | None = ..., + check: Callable[[BaseExcT_co_default], bool] | None = ..., + ) -> None: ... + + @overload + def __init__( + self: RaisesExc[BaseException], # Give E a value. + /, + *, + match: str | Pattern[str] | None, + # If exception_type is not provided, check() must do any typechecks itself. + check: Callable[[BaseException], bool] | None = ..., + ) -> None: ... + + @overload + def __init__(self, /, *, check: Callable[[BaseException], bool]) -> None: ... + + def __init__( + self, + expected_exception: ( + type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] | None + ) = None, + /, + *, + match: str | Pattern[str] | None = None, + check: Callable[[BaseExcT_co_default], bool] | None = None, + ): + super().__init__(match=match, check=check) + if isinstance(expected_exception, tuple): + expected_exceptions = expected_exception + elif expected_exception is None: + expected_exceptions = () + else: + expected_exceptions = (expected_exception,) + + if (expected_exceptions == ()) and match is None and check is None: + raise ValueError("You must specify at least one parameter to match on.") + + self.expected_exceptions = tuple( + self._parse_exc(e, expected="a BaseException type") + for e in expected_exceptions + ) + + self._just_propagate = False + + def matches( + self, + exception: BaseException | None, + ) -> TypeGuard[BaseExcT_co_default]: + """Check if an exception matches the requirements of this :class:`RaisesExc`. + If it fails, :attr:`RaisesExc.fail_reason` will be set. + + Examples:: + + assert RaisesExc(ValueError).matches(my_exception): + # is equivalent to + assert isinstance(my_exception, ValueError) + + # this can be useful when checking e.g. the ``__cause__`` of an exception. + with pytest.raises(ValueError) as excinfo: + ... + assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__) + # above line is equivalent to + assert isinstance(excinfo.value.__cause__, SyntaxError) + assert re.search("foo", str(excinfo.value.__cause__) + + """ + self._just_propagate = False + if exception is None: + self._fail_reason = "exception is None" + return False + if not self._check_type(exception): + self._just_propagate = True + return False + + if not self._check_match(exception): + return False + + return self._check_check(exception) + + def __repr__(self) -> str: + parameters = [] + if self.expected_exceptions: + parameters.append(_exception_type_name(self.expected_exceptions)) + if self.match is not None: + # If no flags were specified, discard the redundant re.compile() here. + parameters.append( + f"match={_match_pattern(self.match)!r}", + ) + if self.check is not None: + parameters.append(f"check={repr_callable(self.check)}") + return f"RaisesExc({', '.join(parameters)})" + + def _check_type(self, exception: BaseException) -> TypeGuard[BaseExcT_co_default]: + self._fail_reason = _check_raw_type(self.expected_exceptions, exception) + return self._fail_reason is None + + def __enter__(self) -> ExceptionInfo[BaseExcT_co_default]: + self.excinfo: ExceptionInfo[BaseExcT_co_default] = ExceptionInfo.for_later() + return self.excinfo + + # TODO: move common code into superclass + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + if not self.expected_exceptions: + fail("DID NOT RAISE any exception") + if len(self.expected_exceptions) > 1: + fail(f"DID NOT RAISE any of {self.expected_exceptions!r}") + + fail(f"DID NOT RAISE {self.expected_exceptions[0]!r}") + + assert self.excinfo is not None, ( + "Internal error - should have been constructed in __enter__" + ) + + if not self.matches(exc_val): + if self._just_propagate: + return False + raise AssertionError(self._fail_reason) + + # Cast to narrow the exception type now that it's verified.... + # even though the TypeGuard in self.matches should be narrowing + exc_info = cast( + "tuple[type[BaseExcT_co_default], BaseExcT_co_default, types.TracebackType]", + (exc_type, exc_val, exc_tb), + ) + self.excinfo.fill_unfilled(exc_info) + return True + + +@final +class RaisesGroup(AbstractRaises[BaseExceptionGroup[BaseExcT_co]]): + """ + .. versionadded:: 8.4 + + Contextmanager for checking for an expected :exc:`ExceptionGroup`. + This works similar to :func:`pytest.raises`, but allows for specifying the structure of an :exc:`ExceptionGroup`. + :meth:`ExceptionInfo.group_contains` also tries to handle exception groups, + but it is very bad at checking that you *didn't* get unexpected exceptions. + + The catching behaviour differs from :ref:`except* `, being much + stricter about the structure by default. + By using ``allow_unwrapped=True`` and ``flatten_subgroups=True`` you can match + :ref:`except* ` fully when expecting a single exception. + + :param args: + Any number of exception types, :class:`RaisesGroup` or :class:`RaisesExc` + to specify the exceptions contained in this exception. + All specified exceptions must be present in the raised group, *and no others*. + + If you expect a variable number of exceptions you need to use + :func:`pytest.raises(ExceptionGroup) ` and manually check + the contained exceptions. Consider making use of :meth:`RaisesExc.matches`. + + It does not care about the order of the exceptions, so + ``RaisesGroup(ValueError, TypeError)`` + is equivalent to + ``RaisesGroup(TypeError, ValueError)``. + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception group and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + Note that " (5 subgroups)" will be stripped from the ``repr`` before matching. + :kwparam Callable[[E], bool] check: + If specified, a callable that will be called with the group as a parameter + after successfully matching the expected exceptions. If it returns ``True`` + it will be considered a match, if not it will be considered a failed match. + :kwparam bool allow_unwrapped: + If expecting a single exception or :class:`RaisesExc` it will match even + if the exception is not inside an exceptiongroup. + + Using this together with ``match``, ``check`` or expecting multiple exceptions + will raise an error. + :kwparam bool flatten_subgroups: + "flatten" any groups inside the raised exception group, extracting all exceptions + inside any nested groups, before matching. Without this it expects you to + fully specify the nesting structure by passing :class:`RaisesGroup` as expected + parameter. + + Examples:: + + with RaisesGroup(ValueError): + raise ExceptionGroup("", (ValueError(),)) + # match + with RaisesGroup( + ValueError, + ValueError, + RaisesExc(TypeError, match="^expected int$"), + match="^my group$", + ): + raise ExceptionGroup( + "my group", + [ + ValueError(), + TypeError("expected int"), + ValueError(), + ], + ) + # check + with RaisesGroup( + KeyboardInterrupt, + match="^hello$", + check=lambda x: isinstance(x.__cause__, ValueError), + ): + raise BaseExceptionGroup("hello", [KeyboardInterrupt()]) from ValueError + # nested groups + with RaisesGroup(RaisesGroup(ValueError)): + raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) + + # flatten_subgroups + with RaisesGroup(ValueError, flatten_subgroups=True): + raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) + + # allow_unwrapped + with RaisesGroup(ValueError, allow_unwrapped=True): + raise ValueError + + + :meth:`RaisesGroup.matches` can also be used directly to check a standalone exception group. + + + The matching algorithm is greedy, which means cases such as this may fail:: + + with RaisesGroup(ValueError, RaisesExc(ValueError, match="hello")): + raise ExceptionGroup("", (ValueError("hello"), ValueError("goodbye"))) + + even though it generally does not care about the order of the exceptions in the group. + To avoid the above you should specify the first :exc:`ValueError` with a :class:`RaisesExc` as well. + + .. note:: + When raised exceptions don't match the expected ones, you'll get a detailed error + message explaining why. This includes ``repr(check)`` if set, which in Python can be + overly verbose, showing memory locations etc etc. + + If installed and imported (in e.g. ``conftest.py``), the ``hypothesis`` library will + monkeypatch this output to provide shorter & more readable repr's. + """ + + # allow_unwrapped=True requires: singular exception, exception not being + # RaisesGroup instance, match is None, check is None + @overload + def __init__( + self, + expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + /, + *, + allow_unwrapped: Literal[True], + flatten_subgroups: bool = False, + ) -> None: ... + + # flatten_subgroups = True also requires no nested RaisesGroup + @overload + def __init__( + self, + expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + /, + *other_exceptions: type[BaseExcT_co] | RaisesExc[BaseExcT_co], + flatten_subgroups: Literal[True], + match: str | Pattern[str] | None = None, + check: Callable[[BaseExceptionGroup[BaseExcT_co]], bool] | None = None, + ) -> None: ... + + # simplify the typevars if possible (the following 3 are equivalent but go simpler->complicated) + # ... the first handles RaisesGroup[ValueError], the second RaisesGroup[ExceptionGroup[ValueError]], + # the third RaisesGroup[ValueError | ExceptionGroup[ValueError]]. + # ... otherwise, we will get results like RaisesGroup[ValueError | ExceptionGroup[Never]] (I think) + # (technically correct but misleading) + @overload + def __init__( + self: RaisesGroup[ExcT_1], + expected_exception: type[ExcT_1] | RaisesExc[ExcT_1], + /, + *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1], + match: str | Pattern[str] | None = None, + check: Callable[[ExceptionGroup[ExcT_1]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[ExceptionGroup[ExcT_2]], + expected_exception: RaisesGroup[ExcT_2], + /, + *other_exceptions: RaisesGroup[ExcT_2], + match: str | Pattern[str] | None = None, + check: Callable[[ExceptionGroup[ExceptionGroup[ExcT_2]]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[ExcT_1 | ExceptionGroup[ExcT_2]], + expected_exception: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], + /, + *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[[ExceptionGroup[ExcT_1 | ExceptionGroup[ExcT_2]]], bool] | None + ) = None, + ) -> None: ... + + # same as the above 3 but handling BaseException + @overload + def __init__( + self: RaisesGroup[BaseExcT_1], + expected_exception: type[BaseExcT_1] | RaisesExc[BaseExcT_1], + /, + *other_exceptions: type[BaseExcT_1] | RaisesExc[BaseExcT_1], + match: str | Pattern[str] | None = None, + check: Callable[[BaseExceptionGroup[BaseExcT_1]], bool] | None = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[BaseExceptionGroup[BaseExcT_2]], + expected_exception: RaisesGroup[BaseExcT_2], + /, + *other_exceptions: RaisesGroup[BaseExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[[BaseExceptionGroup[BaseExceptionGroup[BaseExcT_2]]], bool] | None + ) = None, + ) -> None: ... + + @overload + def __init__( + self: RaisesGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], + expected_exception: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + /, + *other_exceptions: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + match: str | Pattern[str] | None = None, + check: ( + Callable[ + [BaseExceptionGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]]], + bool, + ] + | None + ) = None, + ) -> None: ... + + def __init__( + self: RaisesGroup[ExcT_1 | BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], + expected_exception: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + /, + *other_exceptions: type[BaseExcT_1] + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2], + allow_unwrapped: bool = False, + flatten_subgroups: bool = False, + match: str | Pattern[str] | None = None, + check: ( + Callable[[BaseExceptionGroup[BaseExcT_1]], bool] + | Callable[[ExceptionGroup[ExcT_1]], bool] + | None + ) = None, + ): + # The type hint on the `self` and `check` parameters uses different formats + # that are *very* hard to reconcile while adhering to the overloads, so we cast + # it to avoid an error when passing it to super().__init__ + check = cast( + "Callable[[BaseExceptionGroup[ExcT_1|BaseExcT_1|BaseExceptionGroup[BaseExcT_2]]], bool]", + check, + ) + super().__init__(match=match, check=check) + self.allow_unwrapped = allow_unwrapped + self.flatten_subgroups: bool = flatten_subgroups + self.is_baseexception = False + + if allow_unwrapped and other_exceptions: + raise ValueError( + "You cannot specify multiple exceptions with `allow_unwrapped=True.`" + " If you want to match one of multiple possible exceptions you should" + " use a `RaisesExc`." + " E.g. `RaisesExc(check=lambda e: isinstance(e, (...)))`", + ) + if allow_unwrapped and isinstance(expected_exception, RaisesGroup): + raise ValueError( + "`allow_unwrapped=True` has no effect when expecting a `RaisesGroup`." + " You might want it in the expected `RaisesGroup`, or" + " `flatten_subgroups=True` if you don't care about the structure.", + ) + if allow_unwrapped and (match is not None or check is not None): + raise ValueError( + "`allow_unwrapped=True` bypasses the `match` and `check` parameters" + " if the exception is unwrapped. If you intended to match/check the" + " exception you should use a `RaisesExc` object. If you want to match/check" + " the exceptiongroup when the exception *is* wrapped you need to" + " do e.g. `if isinstance(exc.value, ExceptionGroup):" + " assert RaisesGroup(...).matches(exc.value)` afterwards.", + ) + + self.expected_exceptions: tuple[ + type[BaseExcT_co] | RaisesExc[BaseExcT_co] | RaisesGroup[BaseException], ... + ] = tuple( + self._parse_excgroup(e, "a BaseException type, RaisesExc, or RaisesGroup") + for e in ( + expected_exception, + *other_exceptions, + ) + ) + + def _parse_excgroup( + self, + exc: ( + type[BaseExcT_co] + | types.GenericAlias + | RaisesExc[BaseExcT_1] + | RaisesGroup[BaseExcT_2] + ), + expected: str, + ) -> type[BaseExcT_co] | RaisesExc[BaseExcT_1] | RaisesGroup[BaseExcT_2]: + # verify exception type and set `self.is_baseexception` + if isinstance(exc, RaisesGroup): + if self.flatten_subgroups: + raise ValueError( + "You cannot specify a nested structure inside a RaisesGroup with" + " `flatten_subgroups=True`. The parameter will flatten subgroups" + " in the raised exceptiongroup before matching, which would never" + " match a nested structure.", + ) + self.is_baseexception |= exc.is_baseexception + exc._nested = True + return exc + elif isinstance(exc, RaisesExc): + self.is_baseexception |= exc.is_baseexception + exc._nested = True + return exc + elif isinstance(exc, tuple): + raise TypeError( + f"Expected {expected}, but got {type(exc).__name__!r}.\n" + "RaisesGroup does not support tuples of exception types when expecting one of " + "several possible exception types like RaisesExc.\n" + "If you meant to expect a group with multiple exceptions, list them as separate arguments." + ) + else: + return super()._parse_exc(exc, expected) + + @overload + def __enter__( + self: RaisesGroup[ExcT_1], + ) -> ExceptionInfo[ExceptionGroup[ExcT_1]]: ... + @overload + def __enter__( + self: RaisesGroup[BaseExcT_1], + ) -> ExceptionInfo[BaseExceptionGroup[BaseExcT_1]]: ... + + def __enter__(self) -> ExceptionInfo[BaseExceptionGroup[BaseException]]: + self.excinfo: ExceptionInfo[BaseExceptionGroup[BaseExcT_co]] = ( + ExceptionInfo.for_later() + ) + return self.excinfo + + def __repr__(self) -> str: + reqs = [ + e.__name__ if isinstance(e, type) else repr(e) + for e in self.expected_exceptions + ] + if self.allow_unwrapped: + reqs.append(f"allow_unwrapped={self.allow_unwrapped}") + if self.flatten_subgroups: + reqs.append(f"flatten_subgroups={self.flatten_subgroups}") + if self.match is not None: + # If no flags were specified, discard the redundant re.compile() here. + reqs.append(f"match={_match_pattern(self.match)!r}") + if self.check is not None: + reqs.append(f"check={repr_callable(self.check)}") + return f"RaisesGroup({', '.join(reqs)})" + + def _unroll_exceptions( + self, + exceptions: Sequence[BaseException], + ) -> Sequence[BaseException]: + """Used if `flatten_subgroups=True`.""" + res: list[BaseException] = [] + for exc in exceptions: + if isinstance(exc, BaseExceptionGroup): + res.extend(self._unroll_exceptions(exc.exceptions)) + + else: + res.append(exc) + return res + + @overload + def matches( + self: RaisesGroup[ExcT_1], + exception: BaseException | None, + ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... + @overload + def matches( + self: RaisesGroup[BaseExcT_1], + exception: BaseException | None, + ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... + + def matches( + self, + exception: BaseException | None, + ) -> bool: + """Check if an exception matches the requirements of this RaisesGroup. + If it fails, `RaisesGroup.fail_reason` will be set. + + Example:: + + with pytest.raises(TypeError) as excinfo: + ... + assert RaisesGroup(ValueError).matches(excinfo.value.__cause__) + # the above line is equivalent to + myexc = excinfo.value.__cause + assert isinstance(myexc, BaseExceptionGroup) + assert len(myexc.exceptions) == 1 + assert isinstance(myexc.exceptions[0], ValueError) + """ + self._fail_reason = None + if exception is None: + self._fail_reason = "exception is None" + return False + if not isinstance(exception, BaseExceptionGroup): + # we opt to only print type of the exception here, as the repr would + # likely be quite long + not_group_msg = f"`{type(exception).__name__}()` is not an exception group" + if len(self.expected_exceptions) > 1: + self._fail_reason = not_group_msg + return False + # if we have 1 expected exception, check if it would work even if + # allow_unwrapped is not set + res = self._check_expected(self.expected_exceptions[0], exception) + if res is None and self.allow_unwrapped: + return True + + if res is None: + self._fail_reason = ( + f"{not_group_msg}, but would match with `allow_unwrapped=True`" + ) + elif self.allow_unwrapped: + self._fail_reason = res + else: + self._fail_reason = not_group_msg + return False + + actual_exceptions: Sequence[BaseException] = exception.exceptions + if self.flatten_subgroups: + actual_exceptions = self._unroll_exceptions(actual_exceptions) + + if not self._check_match(exception): + self._fail_reason = cast(str, self._fail_reason) + old_reason = self._fail_reason + if ( + len(actual_exceptions) == len(self.expected_exceptions) == 1 + and isinstance(expected := self.expected_exceptions[0], type) + and isinstance(actual := actual_exceptions[0], expected) + and self._check_match(actual) + ): + assert self.match is not None, "can't be None if _check_match failed" + assert self._fail_reason is old_reason is not None + self._fail_reason += ( + f"\n" + f" but matched the expected `{self._repr_expected(expected)}`.\n" + f" You might want " + f"`RaisesGroup(RaisesExc({expected.__name__}, match={_match_pattern(self.match)!r}))`" + ) + else: + self._fail_reason = old_reason + return False + + # do the full check on expected exceptions + if not self._check_exceptions( + exception, + actual_exceptions, + ): + self._fail_reason = cast(str, self._fail_reason) + assert self._fail_reason is not None + old_reason = self._fail_reason + # if we're not expecting a nested structure, and there is one, do a second + # pass where we try flattening it + if ( + not self.flatten_subgroups + and not any( + isinstance(e, RaisesGroup) for e in self.expected_exceptions + ) + and any(isinstance(e, BaseExceptionGroup) for e in actual_exceptions) + and self._check_exceptions( + exception, + self._unroll_exceptions(exception.exceptions), + ) + ): + # only indent if it's a single-line reason. In a multi-line there's already + # indented lines that this does not belong to. + indent = " " if "\n" not in self._fail_reason else "" + self._fail_reason = ( + old_reason + + f"\n{indent}Did you mean to use `flatten_subgroups=True`?" + ) + else: + self._fail_reason = old_reason + return False + + # Only run `self.check` once we know `exception` is of the correct type. + if not self._check_check(exception): + reason = ( + cast(str, self._fail_reason) + f" on the {type(exception).__name__}" + ) + if ( + len(actual_exceptions) == len(self.expected_exceptions) == 1 + and isinstance(expected := self.expected_exceptions[0], type) + # we explicitly break typing here :) + and self._check_check(actual_exceptions[0]) # type: ignore[arg-type] + ): + self._fail_reason = reason + ( + f", but did return True for the expected {self._repr_expected(expected)}." + f" You might want RaisesGroup(RaisesExc({expected.__name__}, check=<...>))" + ) + else: + self._fail_reason = reason + return False + + return True + + @staticmethod + def _check_expected( + expected_type: ( + type[BaseException] | RaisesExc[BaseException] | RaisesGroup[BaseException] + ), + exception: BaseException, + ) -> str | None: + """Helper method for `RaisesGroup.matches` and `RaisesGroup._check_exceptions` + to check one of potentially several expected exceptions.""" + if isinstance(expected_type, type): + return _check_raw_type(expected_type, exception) + res = expected_type.matches(exception) + if res: + return None + assert expected_type.fail_reason is not None + if expected_type.fail_reason.startswith("\n"): + return f"\n{expected_type!r}: {indent(expected_type.fail_reason, ' ')}" + return f"{expected_type!r}: {expected_type.fail_reason}" + + @staticmethod + def _repr_expected(e: type[BaseException] | AbstractRaises[BaseException]) -> str: + """Get the repr of an expected type/RaisesExc/RaisesGroup, but we only want + the name if it's a type""" + if isinstance(e, type): + return _exception_type_name(e) + return repr(e) + + @overload + def _check_exceptions( + self: RaisesGroup[ExcT_1], + _exception: Exception, + actual_exceptions: Sequence[Exception], + ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... + @overload + def _check_exceptions( + self: RaisesGroup[BaseExcT_1], + _exception: BaseException, + actual_exceptions: Sequence[BaseException], + ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... + + def _check_exceptions( + self, + _exception: BaseException, + actual_exceptions: Sequence[BaseException], + ) -> bool: + """Helper method for RaisesGroup.matches that attempts to pair up expected and actual exceptions""" + # The _exception parameter is not used, but necessary for the TypeGuard + + # full table with all results + results = ResultHolder(self.expected_exceptions, actual_exceptions) + + # (indexes of) raised exceptions that haven't (yet) found an expected + remaining_actual = list(range(len(actual_exceptions))) + # (indexes of) expected exceptions that haven't found a matching raised + failed_expected: list[int] = [] + # successful greedy matches + matches: dict[int, int] = {} + + # loop over expected exceptions first to get a more predictable result + for i_exp, expected in enumerate(self.expected_exceptions): + for i_rem in remaining_actual: + res = self._check_expected(expected, actual_exceptions[i_rem]) + results.set_result(i_exp, i_rem, res) + if res is None: + remaining_actual.remove(i_rem) + matches[i_exp] = i_rem + break + else: + failed_expected.append(i_exp) + + # All exceptions matched up successfully + if not remaining_actual and not failed_expected: + return True + + # in case of a single expected and single raised we simplify the output + if 1 == len(actual_exceptions) == len(self.expected_exceptions): + assert not matches + self._fail_reason = res + return False + + # The test case is failing, so we can do a slow and exhaustive check to find + # duplicate matches etc that will be helpful in debugging + for i_exp, expected in enumerate(self.expected_exceptions): + for i_actual, actual in enumerate(actual_exceptions): + if results.has_result(i_exp, i_actual): + continue + results.set_result( + i_exp, i_actual, self._check_expected(expected, actual) + ) + + successful_str = ( + f"{len(matches)} matched exception{'s' if len(matches) > 1 else ''}. " + if matches + else "" + ) + + # all expected were found + if not failed_expected and results.no_match_for_actual(remaining_actual): + self._fail_reason = ( + f"{successful_str}Unexpected exception(s):" + f" {[actual_exceptions[i] for i in remaining_actual]!r}" + ) + return False + # all raised exceptions were expected + if not remaining_actual and results.no_match_for_expected(failed_expected): + no_match_for_str = ", ".join( + self._repr_expected(self.expected_exceptions[i]) + for i in failed_expected + ) + self._fail_reason = f"{successful_str}Too few exceptions raised, found no match for: [{no_match_for_str}]" + return False + + # if there's only one remaining and one failed, and the unmatched didn't match anything else, + # we elect to only print why the remaining and the failed didn't match. + if ( + 1 == len(remaining_actual) == len(failed_expected) + and results.no_match_for_actual(remaining_actual) + and results.no_match_for_expected(failed_expected) + ): + self._fail_reason = f"{successful_str}{results.get_result(failed_expected[0], remaining_actual[0])}" + return False + + # there's both expected and raised exceptions without matches + s = "" + if matches: + s += f"\n{successful_str}" + indent_1 = " " * 2 + indent_2 = " " * 4 + + if not remaining_actual: + s += "\nToo few exceptions raised!" + elif not failed_expected: + s += "\nUnexpected exception(s)!" + + if failed_expected: + s += "\nThe following expected exceptions did not find a match:" + rev_matches = {v: k for k, v in matches.items()} + for i_failed in failed_expected: + s += ( + f"\n{indent_1}{self._repr_expected(self.expected_exceptions[i_failed])}" + ) + for i_actual, actual in enumerate(actual_exceptions): + if results.get_result(i_exp, i_actual) is None: + # we print full repr of match target + s += ( + f"\n{indent_2}It matches {backquote(repr(actual))} which was paired with " + + backquote( + self._repr_expected( + self.expected_exceptions[rev_matches[i_actual]] + ) + ) + ) + + if remaining_actual: + s += "\nThe following raised exceptions did not find a match" + for i_actual in remaining_actual: + s += f"\n{indent_1}{actual_exceptions[i_actual]!r}:" + for i_exp, expected in enumerate(self.expected_exceptions): + res = results.get_result(i_exp, i_actual) + if i_exp in failed_expected: + assert res is not None + if res[0] != "\n": + s += "\n" + s += indent(res, indent_2) + if res is None: + # we print full repr of match target + s += ( + f"\n{indent_2}It matches {backquote(self._repr_expected(expected))} " + f"which was paired with {backquote(repr(actual_exceptions[matches[i_exp]]))}" + ) + + if len(self.expected_exceptions) == len(actual_exceptions) and possible_match( + results + ): + s += ( + "\nThere exist a possible match when attempting an exhaustive check," + " but RaisesGroup uses a greedy algorithm. " + "Please make your expected exceptions more stringent with `RaisesExc` etc" + " so the greedy algorithm can function." + ) + self._fail_reason = s + return False + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(f"DID NOT RAISE any exception, expected `{self.expected_type()}`") + + assert self.excinfo is not None, ( + "Internal error - should have been constructed in __enter__" + ) + + # group_str is the only thing that differs between RaisesExc and RaisesGroup... + # I might just scrap it? Or make it part of fail_reason + group_str = ( + "(group)" + if self.allow_unwrapped and not issubclass(exc_type, BaseExceptionGroup) + else "group" + ) + + if not self.matches(exc_val): + fail(f"Raised exception {group_str} did not match: {self._fail_reason}") + + # Cast to narrow the exception type now that it's verified.... + # even though the TypeGuard in self.matches should be narrowing + exc_info = cast( + "tuple[type[BaseExceptionGroup[BaseExcT_co]], BaseExceptionGroup[BaseExcT_co], types.TracebackType]", + (exc_type, exc_val, exc_tb), + ) + self.excinfo.fill_unfilled(exc_info) + return True + + def expected_type(self) -> str: + subexcs = [] + for e in self.expected_exceptions: + if isinstance(e, RaisesExc): + subexcs.append(repr(e)) + elif isinstance(e, RaisesGroup): + subexcs.append(e.expected_type()) + elif isinstance(e, type): + subexcs.append(e.__name__) + else: # pragma: no cover + raise AssertionError("unknown type") + group_type = "Base" if self.is_baseexception else "" + return f"{group_type}ExceptionGroup({', '.join(subexcs)})" + + +@final +class NotChecked: + """Singleton for unchecked values in ResultHolder""" + + +class ResultHolder: + """Container for results of checking exceptions. + Used in RaisesGroup._check_exceptions and possible_match. + """ + + def __init__( + self, + expected_exceptions: tuple[ + type[BaseException] | AbstractRaises[BaseException], ... + ], + actual_exceptions: Sequence[BaseException], + ) -> None: + self.results: list[list[str | type[NotChecked] | None]] = [ + [NotChecked for _ in expected_exceptions] for _ in actual_exceptions + ] + + def set_result(self, expected: int, actual: int, result: str | None) -> None: + self.results[actual][expected] = result + + def get_result(self, expected: int, actual: int) -> str | None: + res = self.results[actual][expected] + assert res is not NotChecked + # mypy doesn't support identity checking against anything but None + return res # type: ignore[return-value] + + def has_result(self, expected: int, actual: int) -> bool: + return self.results[actual][expected] is not NotChecked + + def no_match_for_expected(self, expected: list[int]) -> bool: + for i in expected: + for actual_results in self.results: + assert actual_results[i] is not NotChecked + if actual_results[i] is None: + return False + return True + + def no_match_for_actual(self, actual: list[int]) -> bool: + for i in actual: + for res in self.results[i]: + assert res is not NotChecked + if res is None: + return False + return True + + +def possible_match(results: ResultHolder, used: set[int] | None = None) -> bool: + if used is None: + used = set() + curr_row = len(used) + if curr_row == len(results.results): + return True + return any( + val is None and i not in used and possible_match(results, used | {i}) + for (i, val) in enumerate(results.results[curr_row]) + ) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/recwarn.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..e3db717 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/recwarn.py @@ -0,0 +1,367 @@ +# mypy: allow-untyped-defs +"""Record warnings during test function execution.""" + +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterator +from pprint import pformat +import re +from types import TracebackType +from typing import Any +from typing import final +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import Self + +import warnings + +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.outcomes import Exit +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator[WarningsRecorder]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See :ref:`warnings` for information on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call( + *, match: str | re.Pattern[str] | None = ... +) -> WarningsRecorder: ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ... + + +def deprecated_call( + func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any +) -> WarningsRecorder | Any: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func, *args) + return warns( + (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs + ) + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = ..., + *, + match: str | re.Pattern[str] | None = ..., +) -> WarningsChecker: ... + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: ... + + +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + *args: Any, + match: str | re.Pattern[str] | None = None, + **kwargs: Any, +) -> WarningsChecker | Any: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or tuple + of warning classes, and the code inside the ``with`` block must issue at least one + warning of that class or classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, one for + each warning emitted (regardless of whether it is an ``expected_warning`` or not). + Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. + + This function can be used as a context manager:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning): # catch re-emitted warning + ... with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests + such that some runs raise a warning and others do not. + + This could be achieved in the same way as with exceptions, see + :ref:`parametrizing_conditional_raising` for an example. + + """ + __tracebackhide__ = True + if not args: + if kwargs: + argnames = ", ".join(sorted(kwargs)) + raise TypeError( + f"Unexpected keyword arguments passed to pytest.warns: {argnames}" + "\nUse context-manager form instead?" + ) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Each recorded warning is an instance of :class:`warnings.WarningMessage`. + + Adapted from `warnings.catch_warnings`. + + .. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + super().__init__(record=True) + self._entered = False + self._list: list[warnings.WarningMessage] = [] + + @property + def list(self) -> list[warnings.WarningMessage]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> warnings.WarningMessage: + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator[warnings.WarningMessage]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: + """Pop the first recorded warning which is an instance of ``cls``, + but not an instance of a child class of any other match. + Raises ``AssertionError`` if there is no match. + """ + best_idx: int | None = None + for i, w in enumerate(self._list): + if w.category == cls: + return self._list.pop(i) # exact match, stop looking + if issubclass(w.category, cls) and ( + best_idx is None + or not issubclass(w.category, self._list[best_idx].category) + ): + best_idx = i + if best_idx is not None: + return self._list.pop(best_idx) + __tracebackhide__ = True + raise AssertionError(f"{cls!r} not found in warning list") + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + # Type ignored because we basically want the `catch_warnings` generic type + # parameter to be ourselves but that is not possible(?). + def __enter__(self) -> Self: # type: ignore[override] + if self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot enter {self!r} twice") + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot exit {self!r} without entering first") + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + match_expr: str | re.Pattern[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif isinstance(expected_warning, type) and issubclass( + expected_warning, Warning + ): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def matches(self, warning: warnings.WarningMessage) -> bool: + assert self.expected_warning is not None + return issubclass(warning.category, self.expected_warning) and bool( + self.match_expr is None or re.search(self.match_expr, str(warning.message)) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within + # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed + # when the warning doesn't happen. Control-flow exceptions should always + # propagate. + if exc_val is not None and ( + not isinstance(exc_val, Exception) + # Exit is an Exception, not a BaseException, for some reason. + or isinstance(exc_val, Exit) + ): + return + + def found_str() -> str: + return pformat([record.message for record in self], indent=2) + + try: + if not any(issubclass(w.category, self.expected_warning) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f" Emitted warnings: {found_str()}." + ) + elif not any(self.matches(w) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n" + f" Regex: {self.match_expr}\n" + f" Emitted warnings: {found_str()}." + ) + finally: + # Whether or not any warnings matched, we want to re-emit all unmatched warnings. + for w in self: + if not self.matches(w): + warnings.warn_explicit( + message=w.message, + category=w.category, + filename=w.filename, + lineno=w.lineno, + module=w.__module__, + source=w.source, + ) + + # Currently in Python it is possible to pass other types than an + # `str` message when creating `Warning` instances, however this + # causes an exception when :func:`warnings.filterwarnings` is used + # to filter those warnings. See + # https://github.com/python/cpython/issues/103577 for a discussion. + # While this can be considered a bug in CPython, we put guards in + # pytest as the error message produced without this check in place + # is confusing (#10865). + for w in self: + if type(w.message) is not UserWarning: + # If the warning was of an incorrect type then `warnings.warn()` + # creates a UserWarning. Any other warning must have been specified + # explicitly. + continue + if not w.message.args: + # UserWarning() without arguments must have been specified explicitly. + continue + msg = w.message.args[0] + if isinstance(msg, str): + continue + # It's possible that UserWarning was explicitly specified, and + # its first argument was not a string. But that case can't be + # distinguished from an invalid type. + raise TypeError( + f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})" + ) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/reports.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/reports.py new file mode 100644 index 0000000..011a69d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/reports.py @@ -0,0 +1,694 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Iterable +from collections.abc import Iterator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +from io import StringIO +import os +from pprint import pprint +import sys +from typing import Any +from typing import cast +from typing import final +from typing import Literal +from typing import NoReturn +from typing import TYPE_CHECKING + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "{}.{}.{}".format(*d["version_info"][:3]) + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +class BaseReport: + when: str | None + location: tuple[str, int | None, str] | None + longrepr: ( + None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr + ) + sections: list[tuple[str, str]] + nodeid: str + outcome: Literal["passed", "failed", "skipped"] + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> str | None: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + _fspath, _lineno, domain = self.location + return domain + return None + + def _get_verbose_word_with_markup( + self, config: Config, default_markup: Mapping[str, bool] + ) -> tuple[str, Mapping[str, bool]]: + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + + if isinstance(verbose, str): + return verbose, default_markup + + if isinstance(verbose, Sequence) and len(verbose) == 2: + word, markup = verbose + if isinstance(word, str) and isinstance(markup, Mapping): + return word, markup + + fail( # pragma: no cover + "pytest_report_teststatus() hook (from a plugin) returned " + f"an invalid verbose value: {verbose!r}.\nExpected either a string " + "or a tuple of (word, markup)." + ) + + def _to_json(self) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls, reportdict: dict[str, object]) -> Self: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: type[BaseReport], reportdict +) -> NoReturn: + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream) + pprint(f"report_name: {report_class}", stream=stream) + pprint(reportdict, stream=stream) + pprint(f"Please report this bug at {url}", stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +def _format_failed_longrepr( + item: Item, call: CallInfo[None], excinfo: ExceptionInfo[BaseException] +): + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: + # Exception in setup or teardown. + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + return longrepr + + +def _format_exception_group_all_skipped_longrepr( + item: Item, + excinfo: ExceptionInfo[BaseExceptionGroup[BaseException | BaseExceptionGroup]], +) -> tuple[str, int, str]: + r = excinfo._getreprcrash() + assert r is not None, ( + "There should always be a traceback entry for skipping a test." + ) + if all( + getattr(skip, "_use_item_location", False) for skip in excinfo.value.exceptions + ): + path, line = item.reportinfo()[:2] + assert line is not None + loc = (os.fspath(path), line + 1) + default_msg = "skipped" + else: + loc = (str(r.path), r.lineno) + default_msg = r.message + + # Get all unique skip messages. + msgs: list[str] = [] + for exception in excinfo.value.exceptions: + m = getattr(exception, "msg", None) or ( + exception.args[0] if exception.args else None + ) + if m and m not in msgs: + msgs.append(m) + + reason = "; ".join(msgs) if msgs else default_msg + longrepr = (*loc, reason) + return longrepr + + +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + + # Defined by skipping plugin. + # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. + wasxfail: str + + def __init__( + self, + nodeid: str, + location: tuple[str, int | None, str], + keywords: Mapping[str, Any], + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + when: Literal["setup", "call", "teardown"], + sections: Iterable[tuple[str, str]] = (), + duration: float = 0, + start: float = 0, + stop: float = 0, + user_properties: Iterable[tuple[str, object]] | None = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + #: The filesystempath may be relative to ``config.rootdir``. + #: The line number is 0-based. + self.location: tuple[str, int | None, str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords: Mapping[str, Any] = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when: Literal["setup", "call", "teardown"] = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration: float = duration + + #: The system time when the call started, in seconds since the epoch. + self.start: float = start + #: The system time when the call ended, in seconds since the epoch. + self.stop: float = stop + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" + + @classmethod + def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: + """Create and fill a TestReport with standard item and call info. + + :param item: The item. + :param call: The call info. + """ + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + start = call.start + stop = call.stop + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: ( + None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr + ) = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + assert r is not None, ( + "There should always be a traceback entry for skipping a test." + ) + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = (os.fspath(path), line + 1, r.message) + else: + longrepr = (str(r.path), r.lineno, r.message) + elif isinstance(excinfo.value, BaseExceptionGroup) and ( + excinfo.value.split(skip.Exception)[1] is None + ): + # All exceptions in the group are skip exceptions. + outcome = "skipped" + excinfo = cast( + ExceptionInfo[ + BaseExceptionGroup[BaseException | BaseExceptionGroup] + ], + excinfo, + ) + longrepr = _format_exception_group_all_skipped_longrepr(item, excinfo) + else: + outcome = "failed" + longrepr = _format_failed_longrepr(item, call, excinfo) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + start, + stop, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + result: list[Item | Collector] | None, + sections: Iterable[tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location( # type:ignore[override] + self, + ) -> tuple[str, int | None, str] | None: + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return f"" + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + if isinstance(report, TestReport | CollectReport): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: dict[str, Any], +) -> CollectReport | TestReport | None: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: ReprEntry | ReprEntryNative, + ) -> dict[str, Any]: + data = dataclasses.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = dataclasses.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]: + result = dataclasses.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: ReprFileLocation | None, + ) -> dict[str, Any] | None: + if reprcrash is not None: + return dataclasses.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: ReprEntry | ReprEntryNative = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr( + chain + ) + else: + exception_info = ReprExceptionInfo( + reprtraceback=reprtraceback, + reprcrash=reprcrash, + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/runner.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/runner.py new file mode 100644 index 0000000..9c20ff9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/runner.py @@ -0,0 +1,580 @@ +# mypy: allow-untyped-defs +"""Basic collect and runtest protocol implementations.""" + +from __future__ import annotations + +import bdb +from collections.abc import Callable +import dataclasses +import os +import sys +import types +from typing import cast +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from .config import Config +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Directory +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +if TYPE_CHECKING: + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="Show N slowest setup/test durations (N=0 for all)", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=None, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. " + "Default: 0.005 (or 0.0 if -vv is given).", + ) + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.get_verbosity() + if durations is None: + return + if durations_min is None: + durations_min = 0.005 if verbose < 2 else 0.0 + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", f"slowest {durations} durations") + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + tr.write_line("") + message = f"({len(dlist) - i} durations < {durations_min:g}s hidden." + if terminalreporter.config.option.durations_min is None: + message += " Use -vv to show these durations." + message += ")" + tr.write_line(message) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: Session) -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: Session) -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Item | None = None +) -> list[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + # If the session is about to fail or stop, teardown everything - this is + # necessary to correctly report fixture teardown errors (see #11706) + if item.session.shouldfail or item.session.shouldstop: + nextitem = None + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + if sys.version_info >= (3, 12, 0): + del sys.last_exc # type:ignore[attr-defined] + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + if sys.version_info >= (3, 12, 0): + sys.last_exc = e # type:ignore[attr-defined] + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Literal["setup", "call", "teardown"] | None +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds +) -> TestReport: + ihook = item.ihook + if when == "setup": + runtest_hook: Callable[..., None] = ihook.pytest_runtest_setup + elif when == "call": + runtest_hook = ihook.pytest_runtest_call + elif when == "teardown": + runtest_hook = ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + + call = CallInfo.from_call( + lambda: runtest_hook(item=item, **kwds), + when=when, + reraise=get_reraise_exceptions(item.config), + ) + report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) + if log: + ihook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + ihook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def get_reraise_exceptions(config: Config) -> tuple[type[BaseException], ...]: + """Return exception types that should not be suppressed in general.""" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return reraise + + +def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, Skipped | bdb.BdbQuit): + # Special control flow exception. + return False + return True + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@dataclasses.dataclass +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: TResult | None + #: The captured exception of the call, if it raised. + excinfo: ExceptionInfo[BaseException] | None + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: Literal["collect", "setup", "call", "teardown"] + + def __init__( + self, + result: TResult | None, + excinfo: ExceptionInfo[BaseException] | None, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: Callable[[], TResult], + when: Literal["collect", "setup", "call", "teardown"], + reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, + ) -> CallInfo[TResult]: + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :type func: Callable[[], _pytest.runner.TResult] + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + instant = timing.Instant() + try: + result: TResult | None = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + duration = instant.elapsed() + return cls( + start=duration.start.time, + stop=duration.stop.time, + duration=duration.seconds, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + def collect() -> list[Item | Collector]: + # Before collecting, if this is a Directory, load the conftests. + # If a conftest import fails to load, it is considered a collection + # error of the Directory collector. This is why it's done inside of the + # CallInfo wrapper. + # + # Note: initial conftests are loaded early, not here. + if isinstance(collector, Directory): + collector.config.pluginmanager._loadconftestmodules( + collector.path, + collector.config.getoption("importmode"), + rootpath=collector.config.rootpath, + consider_namespace_packages=collector.config.getini( + "consider_namespace_packages" + ), + ) + + return list(collector.collect()) + + call = CallInfo.from_call( + collect, "collect", reraise=(KeyboardInterrupt, SystemExit) + ) + longrepr: None | tuple[str, int, str] | str | TerminalRepr = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + skip_exceptions.append(unittest.SkipTest) + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: dict[ + Node, + tuple[ + # Node's finalizers. + list[Callable[[], object]], + # Node's exception and original traceback, if its setup raised. + tuple[OutcomeException | Exception, types.TracebackType | None] | None, + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc[0].with_traceback(exc[1]) + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__)) + raise + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Item | None) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = (nextitem and nextitem.listchain()) or [] + exceptions: list[BaseException] = [] + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + these_exceptions = [] + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + these_exceptions.append(e) + + if len(these_exceptions) == 1: + exceptions.extend(these_exceptions) + elif these_exceptions: + msg = f"errors while tearing down {node!r}" + exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1])) + + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup("errors during test teardown", exceptions[::-1]) + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/scope.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/scope.py new file mode 100644 index 0000000..2b007e8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" + +from __future__ import annotations + +from enum import Enum +from functools import total_ordering +from typing import Literal + + +_ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function = "function" + Class = "class" + Module = "module" + Package = "package" + Session = "session" + + def next_lower(self) -> Scope: + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> Scope: + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: Scope) -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: _ScopeName, descr: str, where: str | None = None + ) -> Scope: + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setuponly.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..7e6b46b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setuponly.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from collections.abc import Generator + +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="Only setup fixtures, do not execute tests", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="Show setup of fixtures while executing tests", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, object, object]: + try: + return (yield) + finally: + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, request.config, "SETUP") + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[object], request: SubRequest +) -> None: + if fixturedef.cached_result is not None: + config = request.config + if config.option.setupshow: + _show_fixture_action(fixturedef, request.config, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action( + fixturedef: FixtureDef[object], config: Config, msg: str +) -> None: + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + + scopename = fixturedef.scope[0].upper() + tw.write(f"{msg:<8} {scopename} {fixturedef.argname}") + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setupplan.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..4e124cc --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/setupplan.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="Show what fixtures and tests would be executed but " + "don't execute anything", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> object | None: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/skipping.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..3b06762 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/skipping.py @@ -0,0 +1,321 @@ +# mypy: allow-untyped-defs +"""Support for skip/xfail functions and markers.""" + +from __future__ import annotations + +from collections.abc import Generator +from collections.abc import Mapping +import dataclasses +import os +import platform +import sys +import traceback + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.raises import AbstractRaises +from _pytest.reports import BaseReport +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="Report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "strict_xfail", + "Default for the strict parameter of xfail " + "markers when not given explicitly (default: False) (alias: xfail_strict)", + type="bool", + # None => fallback to `strict`. + default=None, + aliases=["xfail_strict"], + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=strict_xfail): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition as a boolean", + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + f"Error evaluating {mark.name!r}: " + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@dataclasses.dataclass(frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Skip | None: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@dataclasses.dataclass(frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + __slots__ = ("raises", "reason", "run", "strict") + + reason: str + run: bool + strict: bool + raises: ( + type[BaseException] + | tuple[type[BaseException], ...] + | AbstractRaises[BaseException] + | None + ) + + +def evaluate_xfail_marks(item: Item) -> Xfail | None: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict") + if strict is None: + strict = item.config.getini("strict_xfail") + if strict is None: + strict = item.config.getini("strict") + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Xfail | None]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(wrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + try: + return (yield) + finally: + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(wrapper=True) +def pytest_runtest_makereport( + item: Item, call: CallInfo[None] +) -> Generator[None, TestReport, TestReport]: + rep = yield + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is None or ( + ( + isinstance(raises, type | tuple) + and isinstance(call.excinfo.value, raises) + ) + or ( + isinstance(raises, AbstractRaises) + and raises.matches(call.excinfo.value) + ) + ): + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + else: + rep.outcome = "failed" + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + return rep + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stash.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stash.py new file mode 100644 index 0000000..6a9ff88 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stash.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Generic +from typing import TypeVar + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + + .. versionadded:: 7.0 + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + + .. versionadded:: 7.0 + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> T | D: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stepwise.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..8901540 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/stepwise.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import dataclasses +from datetime import datetime +from datetime import timedelta +from typing import Any +from typing import TYPE_CHECKING + +from _pytest import nodes +from _pytest.cacheprovider import Cache +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from typing_extensions import Self + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="Exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="Ignore the first failing test but stop on the next failing test. " + "Implicitly enables --stepwise.", + ) + group.addoption( + "--sw-reset", + "--stepwise-reset", + action="store_true", + default=False, + dest="stepwise_reset", + help="Resets stepwise state, restarting the stepwise workflow. " + "Implicitly enables --stepwise.", + ) + + +def pytest_configure(config: Config) -> None: + # --stepwise-skip/--stepwise-reset implies stepwise. + if config.option.stepwise_skip or config.option.stepwise_reset: + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + if hasattr(session.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + + +@dataclasses.dataclass +class StepwiseCacheInfo: + # The nodeid of the last failed test. + last_failed: str | None + + # The number of tests in the last time --stepwise was run. + # We use this information as a simple way to invalidate the cache information, avoiding + # confusing behavior in case the cache is stale. + last_test_count: int | None + + # The date when the cache was last updated, for information purposes only. + last_cache_date_str: str + + @property + def last_cache_date(self) -> datetime: + return datetime.fromisoformat(self.last_cache_date_str) + + @classmethod + def empty(cls) -> Self: + return cls( + last_failed=None, + last_test_count=None, + last_cache_date_str=datetime.now().isoformat(), + ) + + def update_date_to_now(self) -> None: + self.last_cache_date_str = datetime.now().isoformat() + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Session | None = None + self.report_status: list[str] = [] + assert config.cache is not None + self.cache: Cache = config.cache + self.skip: bool = config.getoption("stepwise_skip") + self.reset: bool = config.getoption("stepwise_reset") + self.cached_info = self._load_cached_info() + + def _load_cached_info(self) -> StepwiseCacheInfo: + cached_dict: dict[str, Any] | None = self.cache.get(STEPWISE_CACHE_DIR, None) + if cached_dict: + try: + return StepwiseCacheInfo( + cached_dict["last_failed"], + cached_dict["last_test_count"], + cached_dict["last_cache_date_str"], + ) + except (KeyError, TypeError) as e: + error = f"{type(e).__name__}: {e}" + self.report_status.append(f"error reading cache, discarding ({error})") + + # Cache not found or error during load, return a new cache. + return StepwiseCacheInfo.empty() + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> None: + last_test_count = self.cached_info.last_test_count + self.cached_info.last_test_count = len(items) + + if self.reset: + self.report_status.append("resetting state, not skipping.") + self.cached_info.last_failed = None + return + + if not self.cached_info.last_failed: + self.report_status.append("no previously failed tests, not skipping.") + return + + if last_test_count is not None and last_test_count != len(items): + self.report_status.append( + f"test count changed, not skipping (now {len(items)} tests, previously {last_test_count})." + ) + self.cached_info.last_failed = None + return + + # Check all item nodes until we find a match on last failed. + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.cached_info.last_failed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status.append("previously failed test not found, not skipping.") + else: + cache_age = datetime.now() - self.cached_info.last_cache_date + # Round up to avoid showing microseconds. + cache_age = timedelta(seconds=int(cache_age.total_seconds())) + self.report_status.append( + f"skipping {failed_index} already passed items (cache from {cache_age} ago," + f" use --sw-reset to discard)." + ) + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.cached_info.last_failed: + self.cached_info.last_failed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.cached_info.last_failed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.cached_info.last_failed: + self.cached_info.last_failed = None + + def pytest_report_collectionfinish(self) -> list[str] | None: + if self.config.get_verbosity() >= 0 and self.report_status: + return [f"stepwise: {x}" for x in self.report_status] + return None + + def pytest_sessionfinish(self) -> None: + if hasattr(self.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + self.cached_info.update_date_to_now() + self.cache.set(STEPWISE_CACHE_DIR, dataclasses.asdict(self.cached_info)) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/subtests.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/subtests.py new file mode 100644 index 0000000..e0ceb27 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/subtests.py @@ -0,0 +1,411 @@ +"""Builtin plugin that adds subtests support.""" + +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from collections.abc import Iterator +from collections.abc import Mapping +from contextlib import AbstractContextManager +from contextlib import contextmanager +from contextlib import ExitStack +from contextlib import nullcontext +import dataclasses +import time +from types import TracebackType +from typing import Any +from typing import TYPE_CHECKING + +import pluggy + +from _pytest._code import ExceptionInfo +from _pytest._io.saferepr import saferepr +from _pytest.capture import CaptureFixture +from _pytest.capture import FDCapture +from _pytest.capture import SysCapture +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.logging import catching_logs +from _pytest.logging import LogCaptureHandler +from _pytest.logging import LoggingPlugin +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.runner import get_reraise_exceptions +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_SUBTESTS, + help=( + "Specify verbosity level for subtests. " + "Higher levels will generate output for passed subtests. Failed subtests are always reported." + ), + ) + + +@dataclasses.dataclass(frozen=True, slots=True, kw_only=True) +class SubtestContext: + """The values passed to Subtests.test() that are included in the test report.""" + + msg: str | None + kwargs: Mapping[str, Any] + + def _to_json(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + @classmethod + def _from_json(cls, d: dict[str, Any]) -> Self: + return cls(msg=d["msg"], kwargs=d["kwargs"]) + + +@dataclasses.dataclass(init=False) +class SubtestReport(TestReport): + context: SubtestContext + + @property + def head_line(self) -> str: + _, _, domain = self.location + return f"{domain} {self._sub_test_description()}" + + def _sub_test_description(self) -> str: + parts = [] + if self.context.msg is not None: + parts.append(f"[{self.context.msg}]") + if self.context.kwargs: + params_desc = ", ".join( + f"{k}={saferepr(v)}" for (k, v) in self.context.kwargs.items() + ) + parts.append(f"({params_desc})") + return " ".join(parts) or "()" + + def _to_json(self) -> dict[str, Any]: + data = super()._to_json() + del data["context"] + data["_report_type"] = "SubTestReport" + data["_subtest.context"] = self.context._to_json() + return data + + @classmethod + def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: + report = super()._from_json(reportdict) + report.context = SubtestContext._from_json(reportdict["_subtest.context"]) + return report + + @classmethod + def _new( + cls, + test_report: TestReport, + context: SubtestContext, + captured_output: Captured | None, + captured_logs: CapturedLogs | None, + ) -> Self: + result = super()._from_json(test_report._to_json()) + result.context = context + + if captured_output: + if captured_output.out: + result.sections.append(("Captured stdout call", captured_output.out)) + if captured_output.err: + result.sections.append(("Captured stderr call", captured_output.err)) + + if captured_logs and (log := captured_logs.handler.stream.getvalue()): + result.sections.append(("Captured log call", log)) + + return result + + +@fixture +def subtests(request: SubRequest) -> Subtests: + """Provides subtests functionality.""" + capmam = request.node.config.pluginmanager.get_plugin("capturemanager") + suspend_capture_ctx = ( + capmam.global_and_fixture_disabled if capmam is not None else nullcontext + ) + return Subtests(request.node.ihook, suspend_capture_ctx, request, _ispytest=True) + + +class Subtests: + """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" + + def __init__( + self, + ihook: pluggy.HookRelay, + suspend_capture_ctx: Callable[[], AbstractContextManager[None]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._ihook = ihook + self._suspend_capture_ctx = suspend_capture_ctx + self._request = request + + def test( + self, + msg: str | None = None, + **kwargs: Any, + ) -> _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and + reporting assertion failures and errors individually. + + Usage + ----- + + .. code-block:: python + + def test(subtests): + for i in range(5): + with subtests.test("custom message", i=i): + assert i % 2 == 0 + + :param msg: + If given, the message will be shown in the test report in case of subtest failure. + + :param kwargs: + Arbitrary values that are also added to the subtest report. + """ + return _SubTestContextManager( + self._ihook, + msg, + kwargs, + request=self._request, + suspend_capture_ctx=self._suspend_capture_ctx, + config=self._request.config, + ) + + +@dataclasses.dataclass +class _SubTestContextManager: + """ + Context manager for subtests, capturing exceptions raised inside the subtest scope and handling + them through the pytest machinery. + """ + + # Note: initially the logic for this context manager was implemented directly + # in Subtests.test() as a @contextmanager, however, it is not possible to control the output fully when + # exiting from it due to an exception when in `--exitfirst` mode, so this was refactored into an + # explicit context manager class (pytest-dev/pytest-subtests#134). + + ihook: pluggy.HookRelay + msg: str | None + kwargs: dict[str, Any] + suspend_capture_ctx: Callable[[], AbstractContextManager[None]] + request: SubRequest + config: Config + + def __enter__(self) -> None: + __tracebackhide__ = True + + self._start = time.time() + self._precise_start = time.perf_counter() + self._exc_info = None + + self._exit_stack = ExitStack() + self._captured_output = self._exit_stack.enter_context( + capturing_output(self.request) + ) + self._captured_logs = self._exit_stack.enter_context( + capturing_logs(self.request) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_val is not None: + exc_info = ExceptionInfo.from_exception(exc_val) + else: + exc_info = None + + self._exit_stack.close() + + precise_stop = time.perf_counter() + duration = precise_stop - self._precise_start + stop = time.time() + + call_info = CallInfo[None]( + None, + exc_info, + start=self._start, + stop=stop, + duration=duration, + when="call", + _ispytest=True, + ) + report = self.ihook.pytest_runtest_makereport( + item=self.request.node, call=call_info + ) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=self.msg, kwargs=self.kwargs), + captured_output=self._captured_output, + captured_logs=self._captured_logs, + ) + + if sub_report.failed: + failed_subtests = self.config.stash[failed_subtests_key] + failed_subtests[self.request.node.nodeid] += 1 + + with self.suspend_capture_ctx(): + self.ihook.pytest_runtest_logreport(report=sub_report) + + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self.request.node, call=call_info, report=sub_report + ) + + if exc_val is not None: + if isinstance(exc_val, get_reraise_exceptions(self.config)): + return False + if self.request.session.shouldfail: + return False + return True + + +@contextmanager +def capturing_output(request: SubRequest) -> Iterator[Captured]: + option = request.config.getoption("capture", None) + + capman = request.config.pluginmanager.getplugin("capturemanager") + if getattr(capman, "_capture_fixture", None): + # capsys or capfd are active, subtest should not capture. + fixture = None + elif option == "sys": + fixture = CaptureFixture(SysCapture, request, _ispytest=True) + elif option == "fd": + fixture = CaptureFixture(FDCapture, request, _ispytest=True) + else: + fixture = None + + if fixture is not None: + fixture._start() + + captured = Captured() + try: + yield captured + finally: + if fixture is not None: + out, err = fixture.readouterr() + fixture.close() + captured.out = out + captured.err = err + + +@contextmanager +def capturing_logs( + request: SubRequest, +) -> Iterator[CapturedLogs | None]: + logging_plugin: LoggingPlugin | None = request.config.pluginmanager.getplugin( + "logging-plugin" + ) + if logging_plugin is None: + yield None + else: + handler = LogCaptureHandler() + handler.setFormatter(logging_plugin.formatter) + + captured_logs = CapturedLogs(handler) + with catching_logs(handler, level=logging_plugin.log_level): + yield captured_logs + + +@dataclasses.dataclass +class Captured: + out: str = "" + err: str = "" + + +@dataclasses.dataclass +class CapturedLogs: + handler: LogCaptureHandler + + +def pytest_report_to_serializable(report: TestReport) -> dict[str, Any] | None: + if isinstance(report, SubtestReport): + return report._to_json() + return None + + +def pytest_report_from_serializable(data: dict[str, Any]) -> SubtestReport | None: + if data.get("_report_type") == "SubTestReport": + return SubtestReport._from_json(data) + return None + + +# Dict of nodeid -> number of failed subtests. +# Used to fail top-level tests that passed but contain failed subtests. +failed_subtests_key = StashKey[defaultdict[str, int]]() + + +def pytest_configure(config: Config) -> None: + config.stash[failed_subtests_key] = defaultdict(lambda: 0) + + +@hookimpl(tryfirst=True) +def pytest_report_teststatus( + report: TestReport, + config: Config, +) -> tuple[str, str, str | Mapping[str, bool]] | None: + if report.when != "call": + return None + + quiet = config.get_verbosity(Config.VERBOSITY_SUBTESTS) == 0 + if isinstance(report, SubtestReport): + outcome = report.outcome + description = report._sub_test_description() + + if hasattr(report, "wasxfail"): + if quiet: + return "", "", "" + elif outcome == "skipped": + category = "xfailed" + short = "y" # x letter is used for regular xfail, y for subtest xfail + status = "SUBXFAIL" + # outcome == "passed" in an xfail is only possible via a @pytest.mark.xfail mark, which + # is not applicable to a subtest, which only handles pytest.xfail(). + else: # pragma: no cover + # This should not normally happen, unless some plugin is setting wasxfail without + # the correct outcome. Pytest expects the call outcome to be either skipped or + # passed in case of xfail. + # Let's pass this report to the next hook. + return None + return category, short, f"{status}{description}" + + if report.failed: + return outcome, "u", f"SUBFAILED{description}" + else: + if report.passed: + if quiet: + return "", "", "" + else: + return f"subtests {outcome}", "u", f"SUBPASSED{description}" + elif report.skipped: + if quiet: + return "", "", "" + else: + return outcome, "-", f"SUBSKIPPED{description}" + + else: + failed_subtests_count = config.stash[failed_subtests_key][report.nodeid] + # Top-level test, fail if it contains failed subtests and it has passed. + if report.passed and failed_subtests_count > 0: + report.outcome = "failed" + suffix = "s" if failed_subtests_count > 1 else "" + report.longrepr = f"contains {failed_subtests_count} failed subtest{suffix}" + + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/terminal.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..158558b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/terminal.py @@ -0,0 +1,1762 @@ +# mypy: allow-untyped-defs +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" + +from __future__ import annotations + +import argparse +from collections import Counter +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Mapping +from collections.abc import Sequence +import dataclasses +import datetime +from functools import partial +import inspect +from pathlib import Path +import platform +import sys +import textwrap +from typing import Any +from typing import ClassVar +from typing import final +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import compat +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io import TerminalWriter +from _pytest._io.wcwidth import wcswidth +import _pytest._version +from _pytest.compat import running_on_ci +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", + "subtests passed", + "subtests failed", + "subtests skipped", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: str | None = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[object] | None, + option_string: str | None = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +class TestShortLogReport(NamedTuple): + """Used to store the test status result category, shortletter and verbose word. + For example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :ivar category: + The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. + + :ivar letter: + The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. + + :ivar word: + Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, + ``"ERROR"``, or the empty string. + """ + + category: str + letter: str + word: str | tuple[str, Mapping[str, bool]] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group._addoption( # private to use reserved lower-case short option + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="Increase verbosity", + ) + group.addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="Disable header", + ) + group.addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="Disable summary", + ) + group.addoption( + "--no-fold-skipped", + action="store_false", + dest="fold_skipped", + default=True, + help="Do not fold skipped tests in short summary.", + ) + group.addoption( + "--force-short-summary", + action="store_true", + dest="force_short_summary", + default=False, + help="Force condensed summary output regardless of verbosity level.", + ) + group._addoption( # private to use reserved lower-case short option + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="Decrease verbosity", + ) + group.addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="Set verbosity. Default: 0.", + ) + group._addoption( # private to use reserved lower-case short option + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="Show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group.addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="Disable warnings summary", + ) + group._addoption( # private to use reserved lower-case short option + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="Show locals in tracebacks (disabled by default)", + ) + group.addoption( + "--no-showlocals", + action="store_false", + dest="showlocals", + help="Hide locals in tracebacks (negate --showlocals passed through addopts)", + ) + group.addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="Traceback print mode (auto/long/short/line/native/no)", + ) + group.addoption( + "--xfail-tb", + action="store_true", + dest="xfail_tb", + default=False, + help="Show tracebacks for xfail (as long as --tb != no)", + ) + group.addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default: all.", + ) + group.addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="Don't cut any tracebacks (default is to cut)", + ) + group.addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="Color terminal output (yes/no/auto)", + ) + group.addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled). " + "Default: yes.", + ) + + parser.addini( + "console_output_style", + help='Console output: "classic", or with additional progress information ' + '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces ' + "progress even when capture=no)", + default="progress", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_TEST_CASES, + help=( + "Specify a verbosity level for test case execution, overriding the main level. " + "Higher levels will provide more detailed information about each test case executed." + ), + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + if reporter.isatty(): + plugin = TerminalProgressPlugin(reporter) + config.pluginmanager.register(plugin, "terminalprogress") + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@dataclasses.dataclass +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: str | None = None + fslocation: tuple[str, int] | None = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> str | None: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: TextIO | None = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Session | None = None + self._showfspath: bool | None = None + + self.stats: dict[str, list[Any]] = {} + self._main_color: str | None = None + self._known_types: list[str] | None = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: None | Path | str | int = None + self.reportchars = getreportopt(config) + self.foldskipped = config.option.fold_skipped + self.hasmarkup = self._tw.hasmarkup + # isatty should be a method but was wrongly implemented as a boolean. + # We use CallableBool here to support both. + self.isatty = compat.CallableBool(file.isatty()) + self._progress_nodeids_reported: set[str] = set() + self._timing_nodeids_reported: set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write = timing.Instant() + self._already_displayed_warnings: int | None = None + self._keyboardinterrupt_memo: ExceptionRepr | None = None + + def _determine_show_progress_info( + self, + ) -> Literal["progress", "count", "times", False]: + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) unless explicitly + # overridden by progress-even-when-capture-no + if ( + self.config.getoption("capture", "no") == "no" + and self.config.getini("console_output_style") + != "progress-even-when-capture-no" + ): + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg in {"progress", "progress-even-when-capture-no"}: + return "progress" + elif cfg == "count": + return "count" + elif cfg == "times": + return "times" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: bool | None) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 + + @property + def reported_progress(self) -> int: + """The amount of items reported in the progress so far. + + :meta private: + """ + return len(self._progress_nodeids_reported) + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def wrap_write( + self, + content: str, + *, + flush: bool = False, + margin: int = 8, + line_sep: str = "\n", + **markup: bool, + ) -> None: + """Wrap message with margin for progress info.""" + width_of_current_line = self._tw.width_of_current_line + wrapped = line_sep.join( + textwrap.wrap( + " " * width_of_current_line + content, + width=self._screen_width - margin, + drop_whitespace=True, + replace_whitespace=False, + ), + ) + wrapped = wrapped[width_of_current_line:] + self._tw.write(wrapped, flush=flush, **markup) + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def write_raw(self, content: str, *, flush: bool = False) -> None: + self._tw.write_raw(content, flush=flush) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: str | bytes, **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: tuple[str, int | None, str] + ) -> None: + fspath, lineno, domain = location + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, fspath, lineno, domain) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + + res = TestShortLogReport( + *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + ) + category, letter, word = res.category, res.letter, res.word + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + self._progress_nodeids_reported.add(rep.nodeid) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: + self._tw.write(letter, **markup) + # When running in xdist, the logreport and logfinish of multiple + # items are interspersed, e.g. `logreport`, `logreport`, + # `logfinish`, `logfinish`. To avoid the "past edge" calculation + # from getting confused and overflowing (#7166), do the past edge + # printing here and not in logfinish, except for the 100% which + # should only be printed after all teardowns are finished. + if self._show_progress_info and not self._is_last_item: + self._write_progress_information_if_past_edge() + else: + line = self._locationline(rep.nodeid, *rep.location) + running_xdist = hasattr(rep, "node") + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self.wrap_write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write(f"[{rep.node.gateway.id}]") + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return self.reported_progress == self._session.testscollected + + @hookimpl(wrapper=True) + def pytest_runtestloop(self) -> Generator[None, object, object]: + result = yield + + # Write the final/100% progress -- deferred until the loop is complete. + if ( + self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 + and self._show_progress_info + and self.reported_progress + ): + self._write_progress_information_filling_space() + + return result + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = self.reported_progress + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(progress, collected) + return f" [ {collected} / {collected} ]" + if self._show_progress_info == "times": + if not collected: + return "" + all_reports = ( + self._get_reports_to_display("passed") + + self._get_reports_to_display("xpassed") + + self._get_reports_to_display("failed") + + self._get_reports_to_display("xfailed") + + self._get_reports_to_display("skipped") + + self._get_reports_to_display("error") + + self._get_reports_to_display("") + ) + current_location = all_reports[-1].location[0] + not_reported = [ + r for r in all_reports if r.nodeid not in self._timing_nodeids_reported + ] + tests_in_module = sum( + i.location[0] == current_location for i in self._session.items + ) + tests_completed = sum( + r.when == "setup" + for r in not_reported + if r.location[0] == current_location + ) + last_in_module = tests_completed == tests_in_module + if self.showlongtestinfo or last_in_module: + self._timing_nodeids_reported.update(r.nodeid for r in not_reported) + return format_node_duration( + sum(r.duration for r in not_reported if isinstance(r, TestReport)) + ) + return "" + if collected: + return f" [{self.reported_progress * 100 // collected:3d}%]" + return " [100%]" + + def _write_progress_information_if_past_edge(self) -> None: + w = self._width_of_current_line + if self._show_progress_info == "count": + assert self._session + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + elif self._show_progress_info == "times": + progress_length = len(" 99h 59m") + else: + progress_length = len(" [100%]") + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + main_color, _ = self._get_main_color() + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty(): + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty(): + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`. + if ( + self._collect_report_last_write.elapsed().seconds + < REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = timing.Instant() + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += f" / {errors} error{'s' if errors != 1 else ''}" + if deselected: + line += f" / {deselected} deselected" + if skipped: + line += f" / {skipped} skipped" + if self._numcollected > selected: + line += f" / {selected} selected" + if self.isatty(): + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + self._session_start = timing.Instant() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[str | Sequence[str]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> list[str]: + result = [f"rootdir: {config.rootpath}"] + + if config.inipath: + warning = "" + if config._ignored_config_files: + warning = f" (WARNING: ignoring pytest config in {', '.join(config._ignored_config_files)}!)" + result.append( + "configfile: " + bestrelpath(config.rootpath, config.inipath) + warning + ) + + if config.args_source == Config.ArgsSource.TESTPATHS: + testpaths: list[str] = config.getini("testpaths") + result.append("testpaths: {}".format(", ".join(testpaths))) + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append( + "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) + ) + return result + + def pytest_collection_finish(self, session: Session) -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) + if test_cases_verbosity < 0: + if test_cases_verbosity < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line(f"{name}: {count}") + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: list[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if test_cases_verbosity >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(wrapper=True) + def pytest_sessionfinish( + self, session: Session, exitstatus: int | ExitCode + ) -> Generator[None]: + result = yield + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + return result + + @hookimpl(wrapper=True) + def pytest_terminal_summary(self) -> Generator[None]: + self.summary_errors() + self.summary_failures() + self.summary_xfailures() + self.summary_warnings() + self.summary_passes() + self.summary_xpasses() + try: + return (yield) + finally: + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: int | None, domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: list[WarningReport] | None = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: dict[str, list[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: list[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt(needed_opt): + reports: list[TestReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> list[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + style = self.config.option.tbstyle + self.summary_failures_combined("failed", "FAILURES", style=style) + + def summary_xfailures(self) -> None: + show_tb = self.config.option.xfail_tb + style = self.config.option.tbstyle if show_tb else "no" + self.summary_failures_combined("xfailed", "XFAILURES", style=style) + + def summary_failures_combined( + self, + which_reports: str, + sep_title: str, + *, + style: str, + needed_opt: str | None = None, + ) -> None: + if style != "no": + if not needed_opt or self.hasopt(needed_opt): + reports: list[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if style == "line": + for rep in reports: + line = self._getcrashline(rep) + self._outrep_summary(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: list[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = self._session_start.elapsed() + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration.seconds)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(lines: list[str], *, stat: str) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + config = self.config + for rep in failed: + color = _color_for_type.get(stat, _color_for_type_default) + line = _get_line_with_reprcrash_message( + config, rep, self._tw, {color: True} + ) + lines.append(line) + + def show_xfailed(lines: list[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + + lines.append(line) + + def show_xpassed(lines: list[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped_folded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + prefix = "Skipped: " + for num, fspath, lineno, reason in fskips: + if reason.startswith(prefix): + reason = reason[len(prefix) :] + if lineno is not None: + lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}") + else: + lines.append(f"{markup_word} [{num}] {fspath}: {reason}") + + def show_skipped_unfolded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + + for rep in skipped: + assert rep.longrepr is not None + assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) + assert len(rep.longrepr) == 3, (rep, rep.longrepr) + + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.longrepr[2] + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped(lines: list[str]) -> None: + if self.foldskipped: + show_skipped_folded(lines) + else: + show_skipped_unfolded(lines) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, stat="failed"), + "s": show_skipped, + "p": partial(show_simple, stat="passed"), + "E": partial(show_simple, stat="error"), + } + + lines: list[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info", cyan=True, bold=True) + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> tuple[str, list[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: list[str] = [] + for found_type in self.stats: + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be:: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> list[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) # noqa: UP031 + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") # noqa: UP031 + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] # noqa: UP031 + + return parts, main_color + + +def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + path, *parts = nodeid.split("::") + if parts: + parts_markup = tw.markup("::".join(parts), bold=True) + return path + "::" + parts_markup + else: + return path + + +def _format_trimmed(format: str, msg: str, available_width: int) -> str | None: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool] +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + config, word_markup + ) + word = tw.markup(verbose_word, **verbose_markup) + node = _get_node_id_with_markup(tw, config, rep) + + line = f"{word} {node}" + line_width = wcswidth(line) + + msg: str | None + try: + if isinstance(rep.longrepr, str): + msg = rep.longrepr + else: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + if ( + running_on_ci() or config.option.verbose >= 2 + ) and not config.option.force_short_summary: + msg = f" - {msg}" + else: + available_width = tw.fullwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> list[tuple[int, str, int | None, str]]: + d: dict[tuple[str, int | None, str], list[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: tuple[str, int | None, str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: list[tuple[int, str, int | None, str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", + "subtests passed": "green", + "subtests failed": "red", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> list[str]: + values: list[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = f"{dist.project_name}-{dist.version}" + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def format_node_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the test progress.""" + # The formatting is designed to be compact and readable, with at most 7 characters + # for durations below 100 hours. + if seconds < 0.00001: + return f" {seconds * 1000000:.3f}us" + if seconds < 0.0001: + return f" {seconds * 1000000:.2f}us" + if seconds < 0.001: + return f" {seconds * 1000000:.1f}us" + if seconds < 0.01: + return f" {seconds * 1000:.3f}ms" + if seconds < 0.1: + return f" {seconds * 1000:.2f}ms" + if seconds < 1: + return f" {seconds * 1000:.1f}ms" + if seconds < 60: + return f" {seconds:.3f}s" + if seconds < 3600: + return f" {seconds // 60:.0f}m {seconds % 60:.0f}s" + return f" {seconds // 3600:.0f}h {(seconds % 3600) // 60:.0f}m" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = report.wasxfail + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason + + +class TerminalProgressPlugin: + """Terminal progress reporting plugin using OSC 9;4 ANSI sequences. + + Emits OSC 9;4 sequences to indicate test progress to terminal + tabs/windows/etc. + + Not all terminal emulators support this feature. + + Ref: https://conemu.github.io/en/AnsiEscapeCodes.html#ConEmu_specific_OSC + """ + + def __init__(self, tr: TerminalReporter) -> None: + self._tr = tr + self._session: Session | None = None + self._has_failures = False + + def _emit_progress( + self, + state: Literal["remove", "normal", "error", "indeterminate", "paused"], + progress: int | None = None, + ) -> None: + """Emit OSC 9;4 sequence for indicating progress to the terminal. + + :param state: + Progress state to set. + :param progress: + Progress value 0-100. Required for "normal", optional for "error" + and "paused", otherwise ignored. + """ + assert progress is None or 0 <= progress <= 100 + + # OSC 9;4 sequence: ESC ] 9 ; 4 ; state ; progress ST + # ST can be ESC \ or BEL. ESC \ seems better supported. + match state: + case "remove": + sequence = "\x1b]9;4;0;\x1b\\" + case "normal": + assert progress is not None + sequence = f"\x1b]9;4;1;{progress}\x1b\\" + case "error": + if progress is not None: + sequence = f"\x1b]9;4;2;{progress}\x1b\\" + else: + sequence = "\x1b]9;4;2;\x1b\\" + case "indeterminate": + sequence = "\x1b]9;4;3;\x1b\\" + case "paused": + if progress is not None: + sequence = f"\x1b]9;4;4;{progress}\x1b\\" + else: + sequence = "\x1b]9;4;4;\x1b\\" + + self._tr.write_raw(sequence, flush=True) + + @hookimpl + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + # Show indeterminate progress during collection. + self._emit_progress("indeterminate") + + @hookimpl + def pytest_collection_finish(self) -> None: + assert self._session is not None + if self._session.testscollected > 0: + # Switch from indeterminate to 0% progress. + self._emit_progress("normal", 0) + + @hookimpl + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + self._has_failures = True + + # Let's consider the "call" phase for progress. + if report.when != "call": + return + + # Calculate and emit progress. + assert self._session is not None + collected = self._session.testscollected + if collected > 0: + reported = self._tr.reported_progress + progress = min(reported * 100 // collected, 100) + self._emit_progress("error" if self._has_failures else "normal", progress) + + @hookimpl + def pytest_sessionfinish(self) -> None: + self._emit_progress("remove") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/threadexception.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000..eb57783 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/threadexception.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import collections +from collections.abc import Callable +import functools +import sys +import threading +import traceback +from typing import NamedTuple +from typing import TYPE_CHECKING +import warnings + +from _pytest.config import Config +from _pytest.nodes import Item +from _pytest.stash import StashKey +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +if TYPE_CHECKING: + pass + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + + +class ThreadExceptionMeta(NamedTuple): + msg: str + cause_msg: str + exc_value: BaseException | None + + +thread_exceptions: StashKey[collections.deque[ThreadExceptionMeta | BaseException]] = ( + StashKey() +) + + +def collect_thread_exception(config: Config) -> None: + pop_thread_exception = config.stash[thread_exceptions].pop + errors: list[pytest.PytestUnhandledThreadExceptionWarning | RuntimeError] = [] + meta = None + hook_error = None + try: + while True: + try: + meta = pop_thread_exception() + except IndexError: + break + + if isinstance(meta, BaseException): + hook_error = RuntimeError("Failed to process thread exception") + hook_error.__cause__ = meta + errors.append(hook_error) + continue + + msg = meta.msg + try: + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + except pytest.PytestUnhandledThreadExceptionWarning as e: + # This except happens when the warning is treated as an error (e.g. `-Werror`). + if meta.exc_value is not None: + # Exceptions have a better way to show the traceback, but + # warnings do not, so hide the traceback from the msg and + # set the cause so the traceback shows up in the right place. + e.args = (meta.cause_msg,) + e.__cause__ = meta.exc_value + errors.append(e) + + if len(errors) == 1: + raise errors[0] + if errors: + raise ExceptionGroup("multiple thread exception warnings", errors) + finally: + del errors, meta, hook_error + + +def cleanup( + *, config: Config, prev_hook: Callable[[threading.ExceptHookArgs], object] +) -> None: + try: + try: + # We don't join threads here, so exceptions raised from any + # threads still running by the time _threading_atexits joins them + # do not get captured (see #13027). + collect_thread_exception(config) + finally: + threading.excepthook = prev_hook + finally: + del config.stash[thread_exceptions] + + +def thread_exception_hook( + args: threading.ExceptHookArgs, + /, + *, + append: Callable[[ThreadExceptionMeta | BaseException], object], +) -> None: + try: + # we need to compute these strings here as they might change after + # the excepthook finishes and before the metadata object is + # collected by a pytest hook + thread_name = "" if args.thread is None else args.thread.name + summary = f"Exception in thread {thread_name}" + traceback_message = "\n\n" + "".join( + traceback.format_exception( + args.exc_type, + args.exc_value, + args.exc_traceback, + ) + ) + tracemalloc_tb = "\n" + tracemalloc_message(args.thread) + msg = summary + traceback_message + tracemalloc_tb + cause_msg = summary + tracemalloc_tb + + append( + ThreadExceptionMeta( + # Compute these strings here as they might change later + msg=msg, + cause_msg=cause_msg, + exc_value=args.exc_value, + ) + ) + except BaseException as e: + append(e) + # Raising this will cause the exception to be logged twice, once in our + # collect_thread_exception and once by sys.excepthook + # which is fine - this should never happen anyway and if it does + # it should probably be reported as a pytest bug. + raise + + +def pytest_configure(config: Config) -> None: + prev_hook = threading.excepthook + deque: collections.deque[ThreadExceptionMeta | BaseException] = collections.deque() + config.stash[thread_exceptions] = deque + config.add_cleanup(functools.partial(cleanup, config=config, prev_hook=prev_hook)) + threading.excepthook = functools.partial(thread_exception_hook, append=deque.append) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + collect_thread_exception(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_call(item: Item) -> None: + collect_thread_exception(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_teardown(item: Item) -> None: + collect_thread_exception(item.config) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/timing.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/timing.py new file mode 100644 index 0000000..51c3db2 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/timing.py @@ -0,0 +1,95 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" + +from __future__ import annotations + +import dataclasses +from datetime import datetime +from datetime import timezone +from time import perf_counter +from time import sleep +from time import time +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from pytest import MonkeyPatch + + +@dataclasses.dataclass(frozen=True) +class Instant: + """ + Represents an instant in time, used to both get the timestamp value and to measure + the duration of a time span. + + Inspired by Rust's `std::time::Instant`. + """ + + # Creation time of this instant, using time.time(), to measure actual time. + # Note: using a `lambda` to correctly get the mocked time via `MockTiming`. + time: float = dataclasses.field(default_factory=lambda: time(), init=False) + + # Performance counter tick of the instant, used to measure precise elapsed time. + # Note: using a `lambda` to correctly get the mocked time via `MockTiming`. + perf_count: float = dataclasses.field( + default_factory=lambda: perf_counter(), init=False + ) + + def elapsed(self) -> Duration: + """Measure the duration since `Instant` was created.""" + return Duration(start=self, stop=Instant()) + + def as_utc(self) -> datetime: + """Instant as UTC datetime.""" + return datetime.fromtimestamp(self.time, timezone.utc) + + +@dataclasses.dataclass(frozen=True) +class Duration: + """A span of time as measured by `Instant.elapsed()`.""" + + start: Instant + stop: Instant + + @property + def seconds(self) -> float: + """Elapsed time of the duration in seconds, measured using a performance counter for precise timing.""" + return self.stop.perf_count - self.start.perf_count + + +@dataclasses.dataclass +class MockTiming: + """Mocks _pytest.timing with a known object that can be used to control timing in tests + deterministically. + + pytest itself should always use functions from `_pytest.timing` instead of `time` directly. + + This then allows us more control over time during testing, if testing code also + uses `_pytest.timing` functions. + + Time is static, and only advances through `sleep` calls, thus tests might sleep over large + numbers and obtain accurate time() calls at the end, making tests reliable and instant.""" + + _current_time: float = datetime(2020, 5, 22, 14, 20, 50).timestamp() + + def sleep(self, seconds: float) -> None: + self._current_time += seconds + + def time(self) -> float: + return self._current_time + + def patch(self, monkeypatch: MonkeyPatch) -> None: + # pylint: disable-next=import-self + from _pytest import timing # noqa: PLW0406 + + monkeypatch.setattr(timing, "sleep", self.sleep) + monkeypatch.setattr(timing, "time", self.time) + monkeypatch.setattr(timing, "perf_counter", self.time) + + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tmpdir.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..dcd5784 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tmpdir.py @@ -0,0 +1,312 @@ +# mypy: allow-untyped-defs +"""Support for providing temporary directories to test functions.""" + +from __future__ import annotations + +from collections.abc import Generator +import dataclasses +import os +from pathlib import Path +import re +from shutil import rmtree +import tempfile +from typing import Any +from typing import final +from typing import Literal + +from .pathlib import cleanup_dead_symlinks +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import get_user_id +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.stash import StashKey + + +tmppath_result_key = StashKey[dict[str, bool]]() +RetentionType = Literal["all", "failed", "none"] + + +@final +@dataclasses.dataclass +class TempPathFactory: + """Factory for temporary directories under the common base temp directory, + as discussed at :ref:`temporary directory location and retention`. + """ + + _given_basetemp: Path | None + # pluggy TagTracerSub, not currently exposed, so Any. + _trace: Any + _basetemp: Path | None + _retention_count: int + _retention_policy: RetentionType + + def __init__( + self, + given_basetemp: Path | None, + retention_count: int, + retention_policy: RetentionType, + trace, + basetemp: Path | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._retention_count = retention_count + self._retention_policy = retention_policy + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> TempPathFactory: + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + count = int(config.getini("tmp_path_retention_count")) + if count < 0: + raise ValueError( + f"tmp_path_retention_count must be >= 0. Current input: {count}." + ) + + policy = config.getini("tmp_path_retention_policy") + if policy not in ("all", "failed", "none"): + raise ValueError( + f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." + ) + + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + retention_count=count, + retention_policy=policy, + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed. + + :returns: + The base temporary directory. + """ + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + uid = get_user_id() + if uid is not None: + rootdir_stat = rootdir.stat() + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + keep = self._retention_count + if self._retention_policy == "none": + keep = 0 + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=keep, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> str | None: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, OSError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "tmp_path_retention_count", + help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", + default=3, + ) + + parser.addini( + "tmp_path_retention_policy", + help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " + "(all/failed/none)", + default="all", + ) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path( + request: FixtureRequest, tmp_path_factory: TempPathFactory +) -> Generator[Path]: + """Return a temporary directory (as :class:`pathlib.Path` object) + which is unique to each test function invocation. + The temporary directory is created as a subdirectory + of the base temporary directory, with configurable retention, + as discussed in :ref:`temporary directory location and retention`. + """ + path = _mk_tmp(request, tmp_path_factory) + yield path + + # Remove the tmpdir if the policy is "failed" and the test passed. + policy = tmp_path_factory._retention_policy + result_dict = request.node.stash[tmppath_result_key] + + if policy == "failed" and result_dict.get("call", True): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(path, ignore_errors=True) + + del request.node.stash[tmppath_result_key] + + +def pytest_sessionfinish(session, exitstatus: int | ExitCode): + """After each session, remove base directory if all the tests passed, + the policy is "failed", and the basetemp is not specified by a user. + """ + tmp_path_factory: TempPathFactory = session.config._tmp_path_factory + basetemp = tmp_path_factory._basetemp + if basetemp is None: + return + + policy = tmp_path_factory._retention_policy + if ( + exitstatus == 0 + and policy == "failed" + and tmp_path_factory._given_basetemp is None + ): + if basetemp.is_dir(): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(basetemp, ignore_errors=True) + + # Remove dead symlinks. + if basetemp.is_dir(): + cleanup_dead_symlinks(basetemp) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport( + item: Item, call +) -> Generator[None, TestReport, TestReport]: + rep = yield + assert rep.when is not None + empty: dict[str, bool] = {} + item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed + return rep diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tracemalloc.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tracemalloc.py new file mode 100644 index 0000000..5d0b198 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/tracemalloc.py @@ -0,0 +1,24 @@ +from __future__ import annotations + + +def tracemalloc_message(source: object) -> str: + if source is None: + return "" + + try: + import tracemalloc + except ImportError: + return "" + + tb = tracemalloc.get_object_traceback(source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + return f"\nObject allocated at:\n{formatted_tb}" + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + return ( + "Enable tracemalloc to get traceback where the object was allocated.\n" + f"See {url} for more info." + ) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unittest.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..6432105 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unittest.py @@ -0,0 +1,606 @@ +# mypy: allow-untyped-defs +"""Discover and run std-library "unittest" style tests.""" + +from __future__ import annotations + +from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Iterator +from enum import auto +from enum import Enum +import inspect +import sys +import traceback +import types +from typing import Any +from typing import TYPE_CHECKING +from unittest import TestCase + +import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest.compat import assert_never +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +from _pytest.runner import check_interactive_exception +from _pytest.subtests import SubtestContext +from _pytest.subtests import SubtestReport + + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import ExceptionGroup + +if TYPE_CHECKING: + from types import TracebackType + import unittest + + import twisted.trial.unittest + + +_SysExcInfoType = ( + tuple[type[BaseException], BaseException, types.TracebackType] + | tuple[None, None, None] +) + + +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> UnitTestCase | None: + try: + # Has unittest been imported? + ut = sys.modules["unittest"] + # Is obj a subclass of unittest.TestCase? + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Is obj a concrete class? + # Abstract classes can't be instantiated so no point collecting them. + if inspect.isabstract(obj): + return None + # Yes, so let's collect it. + return UnitTestCase.from_parent(collector, name=name, obj=obj) + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def newinstance(self): + # TestCase __init__ takes the method (test) name. The TestCase + # constructor treats the name "runTest" as a special no-op, so it can be + # used when a dummy instance is needed. While unittest.TestCase has a + # default, some subclasses omit the default (#9610), so always supply + # it. + return self.obj("runTest") + + def collect(self) -> Iterable[Item | Collector]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._register_unittest_setup_method_fixture(cls) + self._register_unittest_setup_class_fixture(cls) + self._register_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + yield TestCaseFunction.from_parent(self, name=name) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction.from_parent(self, name="runTest") + + def _register_unittest_setup_class_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setUpClass and + tearDownClass (#517).""" + setup = getattr(cls, "setUpClass", None) + teardown = getattr(cls, "tearDownClass", None) + if setup is None and teardown is None: + return None + cleanup = getattr(cls, "doClassCleanups", lambda: None) + + def process_teardown_exceptions() -> None: + # tearDown_exceptions is a list set in the class containing exc_infos for errors during + # teardown for the class. + exc_infos = getattr(cls, "tearDown_exceptions", None) + if not exc_infos: + return + exceptions = [exc for (_, exc, _) in exc_infos] + # If a single exception, raise it directly as this provides a more readable + # error (hopefully this will improve in #12255). + if len(exceptions) == 1: + raise exceptions[0] + else: + raise ExceptionGroup("Unittest class cleanup errors", exceptions) + + def unittest_setup_class_fixture( + request: FixtureRequest, + ) -> Generator[None]: + cls = request.cls + if _is_skipped(cls): + reason = cls.__unittest_skip_why__ + raise skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + cleanup() + process_teardown_exceptions() + raise + yield + try: + if teardown is not None: + teardown() + finally: + cleanup() + process_teardown_exceptions() + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setUpClass_fixture_{cls.__qualname__}", + func=unittest_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_unittest_setup_method_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setup_method and + teardown_method (#517).""" + setup = getattr(cls, "setup_method", None) + teardown = getattr(cls, "teardown_method", None) + if setup is None and teardown is None: + return None + + def unittest_setup_method_fixture( + request: FixtureRequest, + ) -> Generator[None]: + self = request.instance + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise skip.Exception(reason, _use_item_location=True) + if setup is not None: + setup(self, request.function) + yield + if teardown is not None: + teardown(self, request.function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setup_method_fixture_{cls.__qualname__}", + func=unittest_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class TestCaseFunction(Function): + nofuncargs = True + failfast = False + _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None + + def _getinstance(self): + assert isinstance(self.parent, UnitTestCase) + return self.parent.obj(self.name) + + # Backward compat for pytest-django; can be removed after pytest-django + # updates + some slack. + @property + def _testcase(self): + return self.instance + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Callable[[], None] | None = None + super().setup() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._obj = None + del self._instance + super().teardown() + + def startTest(self, testcase: unittest.TestCase) -> None: + pass + + def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None: + rawexcinfo = _handle_twisted_exc_info(rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( + rawexcinfo # type: ignore[arg-type] + ) + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + _ = excinfo.value + _ = excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + f"representation:\n{rawexcinfo!r}", + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip( + self, testcase: unittest.TestCase, reason: str, *, handle_subtests: bool = True + ) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + def add_skip() -> None: + try: + raise skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + if not handle_subtests: + add_skip() + return + + if isinstance(testcase, _SubTest): + add_skip() + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) + else: + # For python < 3.11: the non-subtest skips have to be added by `add_skip` only after all subtest + # failures are processed by `_addSubTest`: `self.instance._outcome` has no attribute + # `skipped/errors` anymore. + # We also need to check if `self.instance._outcome` is `None` (this happens if the test + # class/method is decorated with `unittest.skip`, see pytest-dev/pytest-subtests#173). + if sys.version_info < (3, 11) and self.instance._outcome is not None: + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + add_skip() + else: + add_skip() + + def addExpectedFailure( + self, + testcase: unittest.TestCase, + rawexcinfo: _SysExcInfoType, + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: unittest.TestCase, + reason: twisted.trial.unittest.Todo | None = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: unittest.TestCase) -> None: + pass + + def stopTest(self, testcase: unittest.TestCase) -> None: + pass + + def addDuration(self, testcase: unittest.TestCase, elapsed: float) -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + testcase = self.instance + assert testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + testcase(result=self) + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + # We need to consider if the test itself is skipped, or the whole class. + assert isinstance(self.parent, UnitTestCase) + skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj) + if self.config.getoption("usepdb") and not skipped: + self._explicit_tearDown = testcase.tearDown + setattr(testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(testcase, self.name, self.obj) + try: + testcase(result=self) + finally: + delattr(testcase, self.name) + + def _traceback_filter( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> _pytest._code.Traceback: + traceback = super()._traceback_filter(excinfo) + ntraceback = traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest"), + ) + if not ntraceback: + ntraceback = traceback + return ntraceback + + def addSubTest( + self, + test_case: Any, + test: TestCase, + exc_info: ExceptionInfo[BaseException] + | tuple[type[BaseException], BaseException, TracebackType] + | None, + ) -> None: + exception_info: ExceptionInfo[BaseException] | None + match exc_info: + case tuple(): + exception_info = ExceptionInfo(exc_info, _ispytest=True) + case ExceptionInfo() | None: + exception_info = exc_info + case unreachable: + assert_never(unreachable) + + call_info = CallInfo[None]( + None, + exception_info, + start=0, + stop=0, + duration=0, + when="call", + _ispytest=True, + ) + msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] + report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) + sub_report = SubtestReport._new( + report, + SubtestContext(msg=msg, kwargs=dict(test.params)), # type: ignore[attr-defined] + captured_output=None, + captured_logs=None, + ) + self.ihook.pytest_runtest_logreport(report=sub_report) + if check_interactive_exception(call_info, sub_report): + self.ihook.pytest_exception_interact( + node=self, call=call_info, report=sub_report + ) + + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `add_skip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self.addSkip(testcase, reason, handle_subtests=False) + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) + + +def pytest_configure() -> None: + """Register the TestCaseFunction class as an IReporter if twisted.trial is available.""" + if _get_twisted_version() is not TwistedVersion.NotInstalled: + from twisted.trial.itrial import IReporter + from zope.interface import classImplements + + classImplements(TestCaseFunction, IReporter) + + +class TwistedVersion(Enum): + """ + The Twisted version installed in the environment. + + We have different workarounds in place for different versions of Twisted. + """ + + # Twisted version 24 or prior. + Version24 = auto() + # Twisted version 25 or later. + Version25 = auto() + # Twisted version is not available. + NotInstalled = auto() + + +def _get_twisted_version() -> TwistedVersion: + # We need to check if "twisted.trial.unittest" is specifically present in sys.modules. + # This is because we intend to integrate with Trial only when it's actively running + # the test suite, but not needed when only other Twisted components are in use. + if "twisted.trial.unittest" not in sys.modules: + return TwistedVersion.NotInstalled + + import importlib.metadata + + import packaging.version + + version_str = importlib.metadata.version("twisted") + version = packaging.version.parse(version_str) + if version.major <= 24: + return TwistedVersion.Version24 + else: + return TwistedVersion.Version25 + + +# Name of the attribute in `twisted.python.Failure` instances that stores +# the `sys.exc_info()` tuple. +# See twisted.trial support in `pytest_runtest_protocol`. +TWISTED_RAW_EXCINFO_ATTR = "_twisted_raw_excinfo" + + +@hookimpl(wrapper=True) +def pytest_runtest_protocol(item: Item) -> Iterator[None]: + if _get_twisted_version() is TwistedVersion.Version24: + import twisted.python.failure as ut + + # Monkeypatch `Failure.__init__` to store the raw exception info. + original__init__ = ut.Failure.__init__ + + def store_raw_exception_info( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): # pragma: no cover + if exc_value is None: + raw_exc_info = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + if exc_tb is None: + exc_tb = sys.exc_info()[2] + raw_exc_info = (exc_type, exc_value, exc_tb) + setattr(self, TWISTED_RAW_EXCINFO_ATTR, tuple(raw_exc_info)) + try: + original__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: # pragma: no cover + original__init__(self, exc_value, exc_type, exc_tb) + + with MonkeyPatch.context() as patcher: + patcher.setattr(ut.Failure, "__init__", store_raw_exception_info) + return (yield) + else: + return (yield) + + +def _handle_twisted_exc_info( + rawexcinfo: _SysExcInfoType | BaseException, +) -> _SysExcInfoType: + """ + Twisted passes a custom Failure instance to `addError()` instead of using `sys.exc_info()`. + Therefore, if `rawexcinfo` is a `Failure` instance, convert it into the equivalent `sys.exc_info()` tuple + as expected by pytest. + """ + twisted_version = _get_twisted_version() + if twisted_version is TwistedVersion.NotInstalled: + # Unfortunately, because we cannot import `twisted.python.failure` at the top of the file + # and use it in the signature, we need to use `type:ignore` here because we cannot narrow + # the type properly in the `if` statement above. + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version24: + # Twisted calls addError() passing its own classes (like `twisted.python.Failure`), which violates + # the `addError()` signature, so we extract the original `sys.exc_info()` tuple which is stored + # in the object. + if hasattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR): + saved_exc_info = getattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + # Delete the attribute from the original object to avoid leaks. + delattr(rawexcinfo, TWISTED_RAW_EXCINFO_ATTR) + return saved_exc_info # type:ignore[no-any-return] + return rawexcinfo # type:ignore[return-value] + elif twisted_version is TwistedVersion.Version25: + if isinstance(rawexcinfo, BaseException): + import twisted.python.failure + + if isinstance(rawexcinfo, twisted.python.failure.Failure): + tb = rawexcinfo.__traceback__ + if tb is None: + tb = sys.exc_info()[2] + return type(rawexcinfo.value), rawexcinfo.value, tb + + return rawexcinfo # type:ignore[return-value] + else: + # Ideally we would use assert_never() here, but it is not available in all Python versions + # we support, plus we do not require `type_extensions` currently. + assert False, f"Unexpected Twisted version: {twisted_version}" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unraisableexception.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000..0faca36 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +import collections +from collections.abc import Callable +import functools +import gc +import sys +import traceback +from typing import NamedTuple +from typing import TYPE_CHECKING +import warnings + +from _pytest.config import Config +from _pytest.nodes import Item +from _pytest.stash import StashKey +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +if TYPE_CHECKING: + pass + +if sys.version_info < (3, 11): + from exceptiongroup import ExceptionGroup + + +# This is a stash item and not a simple constant to allow pytester to override it. +gc_collect_iterations_key = StashKey[int]() + + +def gc_collect_harder(iterations: int) -> None: + for _ in range(iterations): + gc.collect() + + +class UnraisableMeta(NamedTuple): + msg: str + cause_msg: str + exc_value: BaseException | None + + +unraisable_exceptions: StashKey[collections.deque[UnraisableMeta | BaseException]] = ( + StashKey() +) + + +def collect_unraisable(config: Config) -> None: + pop_unraisable = config.stash[unraisable_exceptions].pop + errors: list[pytest.PytestUnraisableExceptionWarning | RuntimeError] = [] + meta = None + hook_error = None + try: + while True: + try: + meta = pop_unraisable() + except IndexError: + break + + if isinstance(meta, BaseException): + hook_error = RuntimeError("Failed to process unraisable exception") + hook_error.__cause__ = meta + errors.append(hook_error) + continue + + msg = meta.msg + try: + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + except pytest.PytestUnraisableExceptionWarning as e: + # This except happens when the warning is treated as an error (e.g. `-Werror`). + if meta.exc_value is not None: + # Exceptions have a better way to show the traceback, but + # warnings do not, so hide the traceback from the msg and + # set the cause so the traceback shows up in the right place. + e.args = (meta.cause_msg,) + e.__cause__ = meta.exc_value + errors.append(e) + + if len(errors) == 1: + raise errors[0] + if errors: + raise ExceptionGroup("multiple unraisable exception warnings", errors) + finally: + del errors, meta, hook_error + + +def cleanup( + *, config: Config, prev_hook: Callable[[sys.UnraisableHookArgs], object] +) -> None: + # A single collection doesn't necessarily collect everything. + # Constant determined experimentally by the Trio project. + gc_collect_iterations = config.stash.get(gc_collect_iterations_key, 5) + try: + try: + gc_collect_harder(gc_collect_iterations) + collect_unraisable(config) + finally: + sys.unraisablehook = prev_hook + finally: + del config.stash[unraisable_exceptions] + + +def unraisable_hook( + unraisable: sys.UnraisableHookArgs, + /, + *, + append: Callable[[UnraisableMeta | BaseException], object], +) -> None: + try: + # we need to compute these strings here as they might change after + # the unraisablehook finishes and before the metadata object is + # collected by a pytest hook + err_msg = ( + "Exception ignored in" if unraisable.err_msg is None else unraisable.err_msg + ) + summary = f"{err_msg}: {unraisable.object!r}" + traceback_message = "\n\n" + "".join( + traceback.format_exception( + unraisable.exc_type, + unraisable.exc_value, + unraisable.exc_traceback, + ) + ) + tracemalloc_tb = "\n" + tracemalloc_message(unraisable.object) + msg = summary + traceback_message + tracemalloc_tb + cause_msg = summary + tracemalloc_tb + + append( + UnraisableMeta( + msg=msg, + cause_msg=cause_msg, + exc_value=unraisable.exc_value, + ) + ) + except BaseException as e: + append(e) + # Raising this will cause the exception to be logged twice, once in our + # collect_unraisable and once by the unraisablehook calling machinery + # which is fine - this should never happen anyway and if it does + # it should probably be reported as a pytest bug. + raise + + +def pytest_configure(config: Config) -> None: + prev_hook = sys.unraisablehook + deque: collections.deque[UnraisableMeta | BaseException] = collections.deque() + config.stash[unraisable_exceptions] = deque + config.add_cleanup(functools.partial(cleanup, config=config, prev_hook=prev_hook)) + sys.unraisablehook = functools.partial(unraisable_hook, append=deque.append) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + collect_unraisable(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_call(item: Item) -> None: + collect_unraisable(item.config) + + +@pytest.hookimpl(trylast=True) +def pytest_runtest_teardown(item: Item) -> None: + collect_unraisable(item.config) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warning_types.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..93071b4 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warning_types.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +import dataclasses +import inspect +from types import FunctionType +from typing import Any +from typing import final +from typing import Generic +from typing import TypeVar +import warnings + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +class PytestRemovedIn9Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 9.""" + + __module__ = "pytest" + + +class PytestRemovedIn10Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 10.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> PytestExperimentalApiWarning: + return cls(f"{apiname} is an experimental api that may change over time") + + +@final +class PytestReturnNotNoneWarning(PytestWarning): + """ + Warning emitted when a test function returns a value other than ``None``. + + See :ref:`return-not-none` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@dataclasses.dataclass +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: type[_W] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) + + +@final +class PytestFDWarning(PytestWarning): + """When the lsof plugin finds leaked fds.""" + + __module__ = "pytest" + + +def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None: + """ + Issue the warning :param:`message` for the definition of the given :param:`method` + + this helps to log warnings for functions defined prior to finding an issue with them + (like hook wrappers being marked in a legacy mechanism) + """ + lineno = method.__code__.co_firstlineno + filename = inspect.getfile(method) + module = method.__module__ + mod_globals = method.__globals__ + try: + warnings.warn_explicit( + message, + type(message), + filename=filename, + module=module, + registry=mod_globals.setdefault("__warningregistry__", {}), + lineno=lineno, + ) + except Warning as w: + # If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message. + raise type(w)(f"{w}\n at {filename}:{lineno}") from None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warnings.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..1dbf002 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_pytest/warnings.py @@ -0,0 +1,151 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Generator +from contextlib import contextmanager +from contextlib import ExitStack +import sys +from typing import Literal +import warnings + +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter +from _pytest.tracemalloc import tracemalloc_message +import pytest + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: Literal["config", "collect", "runtest"], + item: Item | None, + *, + record: bool = True, +) -> Generator[None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=record) as log: + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + try: + yield + finally: + if record: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + return warnings.formatwarning( + str(warning_message.message), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + tracemalloc_message(warning_message.source) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + return (yield) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, object, object]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_load_initial_conftests( + early_config: Config, +) -> Generator[None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + return (yield) + + +def pytest_configure(config: Config) -> None: + with ExitStack() as stack: + stack.enter_context( + catch_warnings_for_item( + config=config, + ihook=config.hook, + when="config", + item=None, + # this disables recording because the terminalreporter has + # finished by the time it comes to reporting logged warnings + # from the end of config cleanup. So for now, this is only + # useful for setting a warning filter with an 'error' action. + record=False, + ) + ) + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + config.add_cleanup(stack.pop_all().close) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/_yaml/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/INSTALLER b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/METADATA b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/METADATA new file mode 100644 index 0000000..534eb57 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/METADATA @@ -0,0 +1,84 @@ +Metadata-Version: 2.4 +Name: click +Version: 8.3.0 +Summary: Composable command line interface toolkit +Maintainer-email: Pallets +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-Expression: BSD-3-Clause +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Typing :: Typed +License-File: LICENSE.txt +Requires-Dist: colorama; platform_system == 'Windows' +Project-URL: Changes, https://click.palletsprojects.com/page/changes/ +Project-URL: Chat, https://discord.gg/pallets +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Source, https://github.com/pallets/click/ + +
+ +# Click + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +## A Simple Example + +```python +import click + +@click.command() +@click.option("--count", default=1, help="Number of greetings.") +@click.option("--name", prompt="Your name", help="The person to greet.") +def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo(f"Hello, {name}!") + +if __name__ == '__main__': + hello() +``` + +``` +$ python hello.py --count=3 +Your name: Click +Hello, Click! +Hello, Click! +Hello, Click! +``` + + +## Donate + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, [please +donate today][]. + +[please donate today]: https://palletsprojects.com/donate + +## Contributing + +See our [detailed contributing documentation][contrib] for many ways to +contribute, including reporting issues, requesting features, asking or answering +questions, and making PRs. + +[contrib]: https://palletsprojects.com/contributing/ + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD new file mode 100644 index 0000000..a269f67 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD @@ -0,0 +1,41 @@ +click-8.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +click-8.3.0.dist-info/METADATA,sha256=P6vpEHZ_MLBt4SO2eB-QaadcOdiznkzaZtJImRo7_V4,2621 +click-8.3.0.dist-info/RECORD,, +click-8.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click-8.3.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +click-8.3.0.dist-info/licenses/LICENSE.txt,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 +click/__init__.py,sha256=6YyS1aeyknZ0LYweWozNZy0A9nZ_11wmYIhv3cbQrYo,4473 +click/__pycache__/__init__.cpython-311.pyc,, +click/__pycache__/_compat.cpython-311.pyc,, +click/__pycache__/_termui_impl.cpython-311.pyc,, +click/__pycache__/_textwrap.cpython-311.pyc,, +click/__pycache__/_utils.cpython-311.pyc,, +click/__pycache__/_winconsole.cpython-311.pyc,, +click/__pycache__/core.cpython-311.pyc,, +click/__pycache__/decorators.cpython-311.pyc,, +click/__pycache__/exceptions.cpython-311.pyc,, +click/__pycache__/formatting.cpython-311.pyc,, +click/__pycache__/globals.cpython-311.pyc,, +click/__pycache__/parser.cpython-311.pyc,, +click/__pycache__/shell_completion.cpython-311.pyc,, +click/__pycache__/termui.cpython-311.pyc,, +click/__pycache__/testing.cpython-311.pyc,, +click/__pycache__/types.cpython-311.pyc,, +click/__pycache__/utils.cpython-311.pyc,, +click/_compat.py,sha256=v3xBZkFbvA1BXPRkFfBJc6-pIwPI7345m-kQEnpVAs4,18693 +click/_termui_impl.py,sha256=ktpAHyJtNkhyR-x64CQFD6xJQI11fTA3qg2AV3iCToU,26799 +click/_textwrap.py,sha256=BOae0RQ6vg3FkNgSJyOoGzG1meGMxJ_ukWVZKx_v-0o,1400 +click/_utils.py,sha256=kZwtTf5gMuCilJJceS2iTCvRvCY-0aN5rJq8gKw7p8g,943 +click/_winconsole.py,sha256=_vxUuUaxwBhoR0vUWCNuHY8VUefiMdCIyU2SXPqoF-A,8465 +click/core.py,sha256=1A5T8UoAXklIGPTJ83_DJbVi35ehtJS2FTkP_wQ7es0,128855 +click/decorators.py,sha256=5P7abhJtAQYp_KHgjUvhMv464ERwOzrv2enNknlwHyQ,18461 +click/exceptions.py,sha256=8utf8w6V5hJXMnO_ic1FNrtbwuEn1NUu1aDwV8UqnG4,9954 +click/formatting.py,sha256=RVfwwr0rwWNpgGr8NaHodPzkIr7_tUyVh_nDdanLMNc,9730 +click/globals.py,sha256=gM-Nh6A4M0HB_SgkaF5M4ncGGMDHc_flHXu9_oh4GEU,1923 +click/parser.py,sha256=Q31pH0FlQZEq-UXE_ABRzlygEfvxPTuZbWNh4xfXmzw,19010 +click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click/shell_completion.py,sha256=Cc4GQUFuWpfQBa9sF5qXeeYI7n3tI_1k6ZdSn4BZbT0,20994 +click/termui.py,sha256=vAYrKC2a7f_NfEIhAThEVYfa__ib5XQbTSCGtJlABRA,30847 +click/testing.py,sha256=EERbzcl1br0mW0qBS9EqkknfNfXB9WQEW0ELIpkvuSs,19102 +click/types.py,sha256=ek54BNSFwPKsqtfT7jsqcc4WHui8AIFVMKM4oVZIXhc,39927 +click/utils.py,sha256=gCUoewdAhA-QLBUUHxrLh4uj6m7T1WjZZMNPvR0I7YA,20257 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/REQUESTED b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL new file mode 100644 index 0000000..d8b9936 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..d12a849 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/__init__.py new file mode 100644 index 0000000..1aa547c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/__init__.py @@ -0,0 +1,123 @@ +""" +Click is a simple Python module inspired by the stdlib optparse to make +writing command line scripts fun. Unlike other modules, it's based +around a simple API that does not come with too much magic and is +composable. +""" + +from __future__ import annotations + +from .core import Argument as Argument +from .core import Command as Command +from .core import CommandCollection as CommandCollection +from .core import Context as Context +from .core import Group as Group +from .core import Option as Option +from .core import Parameter as Parameter +from .decorators import argument as argument +from .decorators import command as command +from .decorators import confirmation_option as confirmation_option +from .decorators import group as group +from .decorators import help_option as help_option +from .decorators import make_pass_decorator as make_pass_decorator +from .decorators import option as option +from .decorators import pass_context as pass_context +from .decorators import pass_obj as pass_obj +from .decorators import password_option as password_option +from .decorators import version_option as version_option +from .exceptions import Abort as Abort +from .exceptions import BadArgumentUsage as BadArgumentUsage +from .exceptions import BadOptionUsage as BadOptionUsage +from .exceptions import BadParameter as BadParameter +from .exceptions import ClickException as ClickException +from .exceptions import FileError as FileError +from .exceptions import MissingParameter as MissingParameter +from .exceptions import NoSuchOption as NoSuchOption +from .exceptions import UsageError as UsageError +from .formatting import HelpFormatter as HelpFormatter +from .formatting import wrap_text as wrap_text +from .globals import get_current_context as get_current_context +from .termui import clear as clear +from .termui import confirm as confirm +from .termui import echo_via_pager as echo_via_pager +from .termui import edit as edit +from .termui import getchar as getchar +from .termui import launch as launch +from .termui import pause as pause +from .termui import progressbar as progressbar +from .termui import prompt as prompt +from .termui import secho as secho +from .termui import style as style +from .termui import unstyle as unstyle +from .types import BOOL as BOOL +from .types import Choice as Choice +from .types import DateTime as DateTime +from .types import File as File +from .types import FLOAT as FLOAT +from .types import FloatRange as FloatRange +from .types import INT as INT +from .types import IntRange as IntRange +from .types import ParamType as ParamType +from .types import Path as Path +from .types import STRING as STRING +from .types import Tuple as Tuple +from .types import UNPROCESSED as UNPROCESSED +from .types import UUID as UUID +from .utils import echo as echo +from .utils import format_filename as format_filename +from .utils import get_app_dir as get_app_dir +from .utils import get_binary_stream as get_binary_stream +from .utils import get_text_stream as get_text_stream +from .utils import open_file as open_file + + +def __getattr__(name: str) -> object: + import warnings + + if name == "BaseCommand": + from .core import _BaseCommand + + warnings.warn( + "'BaseCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Command' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _BaseCommand + + if name == "MultiCommand": + from .core import _MultiCommand + + warnings.warn( + "'MultiCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Group' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _MultiCommand + + if name == "OptionParser": + from .parser import _OptionParser + + warnings.warn( + "'OptionParser' is deprecated and will be removed in Click 9.0. The" + " old parser is available in 'optparse'.", + DeprecationWarning, + stacklevel=2, + ) + return _OptionParser + + if name == "__version__": + import importlib.metadata + import warnings + + warnings.warn( + "The '__version__' attribute is deprecated and will be removed in" + " Click 9.1. Use feature detection or" + " 'importlib.metadata.version(\"click\")' instead.", + DeprecationWarning, + stacklevel=2, + ) + return importlib.metadata.version("click") + + raise AttributeError(name) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/_compat.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_compat.py new file mode 100644 index 0000000..f2726b9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_compat.py @@ -0,0 +1,622 @@ +from __future__ import annotations + +import codecs +import collections.abc as cabc +import io +import os +import re +import sys +import typing as t +from types import TracebackType +from weakref import WeakKeyDictionary + +CYGWIN = sys.platform.startswith("cygwin") +WIN = sys.platform.startswith("win") +auto_wrap_for_ansi: t.Callable[[t.TextIO], t.TextIO] | None = None +_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + + +def _make_text_stream( + stream: t.BinaryIO, + encoding: str | None, + errors: str | None, + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if encoding is None: + encoding = get_best_encoding(stream) + if errors is None: + errors = "replace" + return _NonClosingTextIOWrapper( + stream, + encoding, + errors, + line_buffering=True, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def is_ascii_encoding(encoding: str) -> bool: + """Checks if a given encoding is ascii.""" + try: + return codecs.lookup(encoding).name == "ascii" + except LookupError: + return False + + +def get_best_encoding(stream: t.IO[t.Any]) -> str: + """Returns the default stream encoding if not found.""" + rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() + if is_ascii_encoding(rv): + return "utf-8" + return rv + + +class _NonClosingTextIOWrapper(io.TextIOWrapper): + def __init__( + self, + stream: t.BinaryIO, + encoding: str | None, + errors: str | None, + force_readable: bool = False, + force_writable: bool = False, + **extra: t.Any, + ) -> None: + self._stream = stream = t.cast( + t.BinaryIO, _FixupStream(stream, force_readable, force_writable) + ) + super().__init__(stream, encoding, errors, **extra) + + def __del__(self) -> None: + try: + self.detach() + except Exception: + pass + + def isatty(self) -> bool: + # https://bitbucket.org/pypy/pypy/issue/1803 + return self._stream.isatty() + + +class _FixupStream: + """The new io interface needs more from streams than streams + traditionally implement. As such, this fix-up code is necessary in + some circumstances. + + The forcing of readable and writable flags are there because some tools + put badly patched objects on sys (one such offender are certain version + of jupyter notebook). + """ + + def __init__( + self, + stream: t.BinaryIO, + force_readable: bool = False, + force_writable: bool = False, + ): + self._stream = stream + self._force_readable = force_readable + self._force_writable = force_writable + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._stream, name) + + def read1(self, size: int) -> bytes: + f = getattr(self._stream, "read1", None) + + if f is not None: + return t.cast(bytes, f(size)) + + return self._stream.read(size) + + def readable(self) -> bool: + if self._force_readable: + return True + x = getattr(self._stream, "readable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self) -> bool: + if self._force_writable: + return True + x = getattr(self._stream, "writable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.write(b"") + except Exception: + try: + self._stream.write(b"") + except Exception: + return False + return True + + def seekable(self) -> bool: + x = getattr(self._stream, "seekable", None) + if x is not None: + return t.cast(bool, x()) + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True + + +def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool: + try: + return isinstance(stream.read(0), bytes) + except Exception: + return default + # This happens in some cases where the stream was already + # closed. In this case, we assume the default. + + +def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool: + try: + stream.write(b"") + except Exception: + try: + stream.write("") + return False + except Exception: + pass + return default + return True + + +def _find_binary_reader(stream: t.IO[t.Any]) -> t.BinaryIO | None: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_reader(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_reader(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _find_binary_writer(stream: t.IO[t.Any]) -> t.BinaryIO | None: + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_writer(stream, False): + return t.cast(t.BinaryIO, stream) + + buf = getattr(stream, "buffer", None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_writer(buf, True): + return t.cast(t.BinaryIO, buf) + + return None + + +def _stream_is_misconfigured(stream: t.TextIO) -> bool: + """A stream is misconfigured if its encoding is ASCII.""" + # If the stream does not have an encoding set, we assume it's set + # to ASCII. This appears to happen in certain unittest + # environments. It's not quite clear what the correct behavior is + # but this at least will force Click to recover somehow. + return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") + + +def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: str | None) -> bool: + """A stream attribute is compatible if it is equal to the + desired value or the desired value is unset and the attribute + has a value. + """ + stream_value = getattr(stream, attr, None) + return stream_value == value or (value is None and stream_value is not None) + + +def _is_compatible_text_stream( + stream: t.TextIO, encoding: str | None, errors: str | None +) -> bool: + """Check if a stream's encoding and errors attributes are + compatible with the desired values. + """ + return _is_compat_stream_attr( + stream, "encoding", encoding + ) and _is_compat_stream_attr(stream, "errors", errors) + + +def _force_correct_text_stream( + text_stream: t.IO[t.Any], + encoding: str | None, + errors: str | None, + is_binary: t.Callable[[t.IO[t.Any], bool], bool], + find_binary: t.Callable[[t.IO[t.Any]], t.BinaryIO | None], + force_readable: bool = False, + force_writable: bool = False, +) -> t.TextIO: + if is_binary(text_stream, False): + binary_reader = t.cast(t.BinaryIO, text_stream) + else: + text_stream = t.cast(t.TextIO, text_stream) + # If the stream looks compatible, and won't default to a + # misconfigured ascii encoding, return it as-is. + if _is_compatible_text_stream(text_stream, encoding, errors) and not ( + encoding is None and _stream_is_misconfigured(text_stream) + ): + return text_stream + + # Otherwise, get the underlying binary reader. + possible_binary_reader = find_binary(text_stream) + + # If that's not possible, silently use the original reader + # and get mojibake instead of exceptions. + if possible_binary_reader is None: + return text_stream + + binary_reader = possible_binary_reader + + # Default errors to replace instead of strict in order to get + # something that works. + if errors is None: + errors = "replace" + + # Wrap the binary stream in a text stream with the correct + # encoding parameters. + return _make_text_stream( + binary_reader, + encoding, + errors, + force_readable=force_readable, + force_writable=force_writable, + ) + + +def _force_correct_text_reader( + text_reader: t.IO[t.Any], + encoding: str | None, + errors: str | None, + force_readable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_reader, + encoding, + errors, + _is_binary_reader, + _find_binary_reader, + force_readable=force_readable, + ) + + +def _force_correct_text_writer( + text_writer: t.IO[t.Any], + encoding: str | None, + errors: str | None, + force_writable: bool = False, +) -> t.TextIO: + return _force_correct_text_stream( + text_writer, + encoding, + errors, + _is_binary_writer, + _find_binary_writer, + force_writable=force_writable, + ) + + +def get_binary_stdin() -> t.BinaryIO: + reader = _find_binary_reader(sys.stdin) + if reader is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdin.") + return reader + + +def get_binary_stdout() -> t.BinaryIO: + writer = _find_binary_writer(sys.stdout) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stdout.") + return writer + + +def get_binary_stderr() -> t.BinaryIO: + writer = _find_binary_writer(sys.stderr) + if writer is None: + raise RuntimeError("Was not able to determine binary stream for sys.stderr.") + return writer + + +def get_text_stdin(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True) + + +def get_text_stdout(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True) + + +def get_text_stderr(encoding: str | None = None, errors: str | None = None) -> t.TextIO: + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) + + +def _wrap_io_open( + file: str | os.PathLike[str] | int, + mode: str, + encoding: str | None, + errors: str | None, +) -> t.IO[t.Any]: + """Handles not passing ``encoding`` and ``errors`` in binary mode.""" + if "b" in mode: + return open(file, mode) + + return open(file, mode, encoding=encoding, errors=errors) + + +def open_stream( + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + atomic: bool = False, +) -> tuple[t.IO[t.Any], bool]: + binary = "b" in mode + filename = os.fspath(filename) + + # Standard streams first. These are simple because they ignore the + # atomic flag. Use fsdecode to handle Path("-"). + if os.fsdecode(filename) == "-": + if any(m in mode for m in ["w", "a", "x"]): + if binary: + return get_binary_stdout(), False + return get_text_stdout(encoding=encoding, errors=errors), False + if binary: + return get_binary_stdin(), False + return get_text_stdin(encoding=encoding, errors=errors), False + + # Non-atomic writes directly go out through the regular open functions. + if not atomic: + return _wrap_io_open(filename, mode, encoding, errors), True + + # Some usability stuff for atomic writes + if "a" in mode: + raise ValueError( + "Appending to an existing file is not supported, because that" + " would involve an expensive `copy`-operation to a temporary" + " file. Open the file in normal `w`-mode and copy explicitly" + " if that's what you're after." + ) + if "x" in mode: + raise ValueError("Use the `overwrite`-parameter instead.") + if "w" not in mode: + raise ValueError("Atomic writes only make sense with `w`-mode.") + + # Atomic writes are more complicated. They work by opening a file + # as a proxy in the same folder and then using the fdopen + # functionality to wrap it in a Python file. Then we wrap it in an + # atomic file that moves the file over on close. + import errno + import random + + try: + perm: int | None = os.stat(filename).st_mode + except OSError: + perm = None + + flags = os.O_RDWR | os.O_CREAT | os.O_EXCL + + if binary: + flags |= getattr(os, "O_BINARY", 0) + + while True: + tmp_filename = os.path.join( + os.path.dirname(filename), + f".__atomic-write{random.randrange(1 << 32):08x}", + ) + try: + fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) + break + except OSError as e: + if e.errno == errno.EEXIST or ( + os.name == "nt" + and e.errno == errno.EACCES + and os.path.isdir(e.filename) + and os.access(e.filename, os.W_OK) + ): + continue + raise + + if perm is not None: + os.chmod(tmp_filename, perm) # in case perm includes bits in umask + + f = _wrap_io_open(fd, mode, encoding, errors) + af = _AtomicFile(f, tmp_filename, os.path.realpath(filename)) + return t.cast(t.IO[t.Any], af), True + + +class _AtomicFile: + def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None: + self._f = f + self._tmp_filename = tmp_filename + self._real_filename = real_filename + self.closed = False + + @property + def name(self) -> str: + return self._real_filename + + def close(self, delete: bool = False) -> None: + if self.closed: + return + self._f.close() + os.replace(self._tmp_filename, self._real_filename) + self.closed = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._f, name) + + def __enter__(self) -> _AtomicFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.close(delete=exc_type is not None) + + def __repr__(self) -> str: + return repr(self._f) + + +def strip_ansi(value: str) -> str: + return _ansi_re.sub("", value) + + +def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool: + while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): + stream = stream._stream + + return stream.__class__.__module__.startswith("ipykernel.") + + +def should_strip_ansi( + stream: t.IO[t.Any] | None = None, color: bool | None = None +) -> bool: + if color is None: + if stream is None: + stream = sys.stdin + return not isatty(stream) and not _is_jupyter_kernel_output(stream) + return not color + + +# On Windows, wrap the output streams with colorama to support ANSI +# color codes. +# NOTE: double check is needed so mypy does not analyze this on Linux +if sys.platform.startswith("win") and WIN: + from ._winconsole import _get_windows_console_stream + + def _get_argv_encoding() -> str: + import locale + + return locale.getpreferredencoding() + + _ansi_stream_wrappers: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def auto_wrap_for_ansi(stream: t.TextIO, color: bool | None = None) -> t.TextIO: + """Support ANSI color and style codes on Windows by wrapping a + stream with colorama. + """ + try: + cached = _ansi_stream_wrappers.get(stream) + except Exception: + cached = None + + if cached is not None: + return cached + + import colorama + + strip = should_strip_ansi(stream, color) + ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) + rv = t.cast(t.TextIO, ansi_wrapper.stream) + _write = rv.write + + def _safe_write(s: str) -> int: + try: + return _write(s) + except BaseException: + ansi_wrapper.reset_all() + raise + + rv.write = _safe_write # type: ignore[method-assign] + + try: + _ansi_stream_wrappers[stream] = rv + except Exception: + pass + + return rv + +else: + + def _get_argv_encoding() -> str: + return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding() + + def _get_windows_console_stream( + f: t.TextIO, encoding: str | None, errors: str | None + ) -> t.TextIO | None: + return None + + +def term_len(x: str) -> int: + return len(strip_ansi(x)) + + +def isatty(stream: t.IO[t.Any]) -> bool: + try: + return stream.isatty() + except Exception: + return False + + +def _make_cached_stream_func( + src_func: t.Callable[[], t.TextIO | None], + wrapper_func: t.Callable[[], t.TextIO], +) -> t.Callable[[], t.TextIO | None]: + cache: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() + + def func() -> t.TextIO | None: + stream = src_func() + + if stream is None: + return None + + try: + rv = cache.get(stream) + except Exception: + rv = None + if rv is not None: + return rv + rv = wrapper_func() + try: + cache[stream] = rv + except Exception: + pass + return rv + + return func + + +_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) + + +binary_streams: cabc.Mapping[str, t.Callable[[], t.BinaryIO]] = { + "stdin": get_binary_stdin, + "stdout": get_binary_stdout, + "stderr": get_binary_stderr, +} + +text_streams: cabc.Mapping[str, t.Callable[[str | None, str | None], t.TextIO]] = { + "stdin": get_text_stdin, + "stdout": get_text_stdout, + "stderr": get_text_stderr, +} diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/_termui_impl.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_termui_impl.py new file mode 100644 index 0000000..47f87b8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_termui_impl.py @@ -0,0 +1,847 @@ +""" +This module contains implementations for the termui module. To keep the +import time of Click down, some infrequently used functionality is +placed in this module and only imported as needed. +""" + +from __future__ import annotations + +import collections.abc as cabc +import contextlib +import math +import os +import shlex +import sys +import time +import typing as t +from gettext import gettext as _ +from io import StringIO +from pathlib import Path +from types import TracebackType + +from ._compat import _default_text_stdout +from ._compat import CYGWIN +from ._compat import get_best_encoding +from ._compat import isatty +from ._compat import open_stream +from ._compat import strip_ansi +from ._compat import term_len +from ._compat import WIN +from .exceptions import ClickException +from .utils import echo + +V = t.TypeVar("V") + +if os.name == "nt": + BEFORE_BAR = "\r" + AFTER_BAR = "\n" +else: + BEFORE_BAR = "\r\033[?25l" + AFTER_BAR = "\033[?25h\n" + + +class ProgressBar(t.Generic[V]): + def __init__( + self, + iterable: cabc.Iterable[V] | None, + length: int | None = None, + fill_char: str = "#", + empty_char: str = " ", + bar_template: str = "%(bar)s", + info_sep: str = " ", + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + label: str | None = None, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, + width: int = 30, + ) -> None: + self.fill_char = fill_char + self.empty_char = empty_char + self.bar_template = bar_template + self.info_sep = info_sep + self.hidden = hidden + self.show_eta = show_eta + self.show_percent = show_percent + self.show_pos = show_pos + self.item_show_func = item_show_func + self.label: str = label or "" + + if file is None: + file = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if file is None: + file = StringIO() + + self.file = file + self.color = color + self.update_min_steps = update_min_steps + self._completed_intervals = 0 + self.width: int = width + self.autowidth: bool = width == 0 + + if length is None: + from operator import length_hint + + length = length_hint(iterable, -1) + + if length == -1: + length = None + if iterable is None: + if length is None: + raise TypeError("iterable or length is required") + iterable = t.cast("cabc.Iterable[V]", range(length)) + self.iter: cabc.Iterable[V] = iter(iterable) + self.length = length + self.pos: int = 0 + self.avg: list[float] = [] + self.last_eta: float + self.start: float + self.start = self.last_eta = time.time() + self.eta_known: bool = False + self.finished: bool = False + self.max_width: int | None = None + self.entered: bool = False + self.current_item: V | None = None + self._is_atty = isatty(self.file) + self._last_line: str | None = None + + def __enter__(self) -> ProgressBar[V]: + self.entered = True + self.render_progress() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.render_finish() + + def __iter__(self) -> cabc.Iterator[V]: + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + self.render_progress() + return self.generator() + + def __next__(self) -> V: + # Iteration is defined in terms of a generator function, + # returned by iter(self); use that to define next(). This works + # because `self.iter` is an iterable consumed by that generator, + # so it is re-entry safe. Calling `next(self.generator())` + # twice works and does "what you want". + return next(iter(self)) + + def render_finish(self) -> None: + if self.hidden or not self._is_atty: + return + self.file.write(AFTER_BAR) + self.file.flush() + + @property + def pct(self) -> float: + if self.finished: + return 1.0 + return min(self.pos / (float(self.length or 1) or 1), 1.0) + + @property + def time_per_iteration(self) -> float: + if not self.avg: + return 0.0 + return sum(self.avg) / float(len(self.avg)) + + @property + def eta(self) -> float: + if self.length is not None and not self.finished: + return self.time_per_iteration * (self.length - self.pos) + return 0.0 + + def format_eta(self) -> str: + if self.eta_known: + t = int(self.eta) + seconds = t % 60 + t //= 60 + minutes = t % 60 + t //= 60 + hours = t % 24 + t //= 24 + if t > 0: + return f"{t}d {hours:02}:{minutes:02}:{seconds:02}" + else: + return f"{hours:02}:{minutes:02}:{seconds:02}" + return "" + + def format_pos(self) -> str: + pos = str(self.pos) + if self.length is not None: + pos += f"/{self.length}" + return pos + + def format_pct(self) -> str: + return f"{int(self.pct * 100): 4}%"[1:] + + def format_bar(self) -> str: + if self.length is not None: + bar_length = int(self.pct * self.width) + bar = self.fill_char * bar_length + bar += self.empty_char * (self.width - bar_length) + elif self.finished: + bar = self.fill_char * self.width + else: + chars = list(self.empty_char * (self.width or 1)) + if self.time_per_iteration != 0: + chars[ + int( + (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) + * self.width + ) + ] = self.fill_char + bar = "".join(chars) + return bar + + def format_progress_line(self) -> str: + show_percent = self.show_percent + + info_bits = [] + if self.length is not None and show_percent is None: + show_percent = not self.show_pos + + if self.show_pos: + info_bits.append(self.format_pos()) + if show_percent: + info_bits.append(self.format_pct()) + if self.show_eta and self.eta_known and not self.finished: + info_bits.append(self.format_eta()) + if self.item_show_func is not None: + item_info = self.item_show_func(self.current_item) + if item_info is not None: + info_bits.append(item_info) + + return ( + self.bar_template + % { + "label": self.label, + "bar": self.format_bar(), + "info": self.info_sep.join(info_bits), + } + ).rstrip() + + def render_progress(self) -> None: + if self.hidden: + return + + if not self._is_atty: + # Only output the label once if the output is not a TTY. + if self._last_line != self.label: + self._last_line = self.label + echo(self.label, file=self.file, color=self.color) + return + + buf = [] + # Update width in case the terminal has been resized + if self.autowidth: + import shutil + + old_width = self.width + self.width = 0 + clutter_length = term_len(self.format_progress_line()) + new_width = max(0, shutil.get_terminal_size().columns - clutter_length) + if new_width < old_width and self.max_width is not None: + buf.append(BEFORE_BAR) + buf.append(" " * self.max_width) + self.max_width = new_width + self.width = new_width + + clear_width = self.width + if self.max_width is not None: + clear_width = self.max_width + + buf.append(BEFORE_BAR) + line = self.format_progress_line() + line_len = term_len(line) + if self.max_width is None or self.max_width < line_len: + self.max_width = line_len + + buf.append(line) + buf.append(" " * (clear_width - line_len)) + line = "".join(buf) + # Render the line only if it changed. + + if line != self._last_line: + self._last_line = line + echo(line, file=self.file, color=self.color, nl=False) + self.file.flush() + + def make_step(self, n_steps: int) -> None: + self.pos += n_steps + if self.length is not None and self.pos >= self.length: + self.finished = True + + if (time.time() - self.last_eta) < 1.0: + return + + self.last_eta = time.time() + + # self.avg is a rolling list of length <= 7 of steps where steps are + # defined as time elapsed divided by the total progress through + # self.length. + if self.pos: + step = (time.time() - self.start) / self.pos + else: + step = time.time() - self.start + + self.avg = self.avg[-6:] + [step] + + self.eta_known = self.length is not None + + def update(self, n_steps: int, current_item: V | None = None) -> None: + """Update the progress bar by advancing a specified number of + steps, and optionally set the ``current_item`` for this new + position. + + :param n_steps: Number of steps to advance. + :param current_item: Optional item to set as ``current_item`` + for the updated position. + + .. versionchanged:: 8.0 + Added the ``current_item`` optional parameter. + + .. versionchanged:: 8.0 + Only render when the number of steps meets the + ``update_min_steps`` threshold. + """ + if current_item is not None: + self.current_item = current_item + + self._completed_intervals += n_steps + + if self._completed_intervals >= self.update_min_steps: + self.make_step(self._completed_intervals) + self.render_progress() + self._completed_intervals = 0 + + def finish(self) -> None: + self.eta_known = False + self.current_item = None + self.finished = True + + def generator(self) -> cabc.Iterator[V]: + """Return a generator which yields the items added to the bar + during construction, and updates the progress bar *after* the + yielded block returns. + """ + # WARNING: the iterator interface for `ProgressBar` relies on + # this and only works because this is a simple generator which + # doesn't create or manage additional state. If this function + # changes, the impact should be evaluated both against + # `iter(bar)` and `next(bar)`. `next()` in particular may call + # `self.generator()` repeatedly, and this must remain safe in + # order for that interface to work. + if not self.entered: + raise RuntimeError("You need to use progress bars in a with block.") + + if not self._is_atty: + yield from self.iter + else: + for rv in self.iter: + self.current_item = rv + + # This allows show_item_func to be updated before the + # item is processed. Only trigger at the beginning of + # the update interval. + if self._completed_intervals == 0: + self.render_progress() + + yield rv + self.update(1) + + self.finish() + self.render_progress() + + +def pager(generator: cabc.Iterable[str], color: bool | None = None) -> None: + """Decide what method to use for paging through text.""" + stdout = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if stdout is None: + stdout = StringIO() + + if not isatty(sys.stdin) or not isatty(stdout): + return _nullpager(stdout, generator, color) + + # Split and normalize the pager command into parts. + pager_cmd_parts = shlex.split(os.environ.get("PAGER", ""), posix=False) + if pager_cmd_parts: + if WIN: + if _tempfilepager(generator, pager_cmd_parts, color): + return + elif _pipepager(generator, pager_cmd_parts, color): + return + + if os.environ.get("TERM") in ("dumb", "emacs"): + return _nullpager(stdout, generator, color) + if (WIN or sys.platform.startswith("os2")) and _tempfilepager( + generator, ["more"], color + ): + return + if _pipepager(generator, ["less"], color): + return + + import tempfile + + fd, filename = tempfile.mkstemp() + os.close(fd) + try: + if _pipepager(generator, ["more"], color): + return + return _nullpager(stdout, generator, color) + finally: + os.unlink(filename) + + +def _pipepager( + generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None +) -> bool: + """Page through text by feeding it to another program. Invoking a + pager through this might support colors. + + Returns `True` if the command was found, `False` otherwise and thus another + pager should be attempted. + """ + # Split the command into the invoked CLI and its parameters. + if not cmd_parts: + return False + + import shutil + + cmd = cmd_parts[0] + cmd_params = cmd_parts[1:] + + cmd_filepath = shutil.which(cmd) + if not cmd_filepath: + return False + # Resolves symlinks and produces a normalized absolute path string. + cmd_path = Path(cmd_filepath).resolve() + cmd_name = cmd_path.name + + import subprocess + + # Make a local copy of the environment to not affect the global one. + env = dict(os.environ) + + # If we're piping to less and the user hasn't decided on colors, we enable + # them by default we find the -R flag in the command line arguments. + if color is None and cmd_name == "less": + less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_params)}" + if not less_flags: + env["LESS"] = "-R" + color = True + elif "r" in less_flags or "R" in less_flags: + color = True + + c = subprocess.Popen( + [str(cmd_path)] + cmd_params, + shell=True, + stdin=subprocess.PIPE, + env=env, + errors="replace", + text=True, + ) + assert c.stdin is not None + try: + for text in generator: + if not color: + text = strip_ansi(text) + + c.stdin.write(text) + except BrokenPipeError: + # In case the pager exited unexpectedly, ignore the broken pipe error. + pass + except Exception as e: + # In case there is an exception we want to close the pager immediately + # and let the caller handle it. + # Otherwise the pager will keep running, and the user may not notice + # the error message, or worse yet it may leave the terminal in a broken state. + c.terminate() + raise e + finally: + # We must close stdin and wait for the pager to exit before we continue + try: + c.stdin.close() + # Close implies flush, so it might throw a BrokenPipeError if the pager + # process exited already. + except BrokenPipeError: + pass + + # Less doesn't respect ^C, but catches it for its own UI purposes (aborting + # search or other commands inside less). + # + # That means when the user hits ^C, the parent process (click) terminates, + # but less is still alive, paging the output and messing up the terminal. + # + # If the user wants to make the pager exit on ^C, they should set + # `LESS='-K'`. It's not our decision to make. + while True: + try: + c.wait() + except KeyboardInterrupt: + pass + else: + break + + return True + + +def _tempfilepager( + generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None +) -> bool: + """Page through text by invoking a program on a temporary file. + + Returns `True` if the command was found, `False` otherwise and thus another + pager should be attempted. + """ + # Split the command into the invoked CLI and its parameters. + if not cmd_parts: + return False + + import shutil + + cmd = cmd_parts[0] + + cmd_filepath = shutil.which(cmd) + if not cmd_filepath: + return False + # Resolves symlinks and produces a normalized absolute path string. + cmd_path = Path(cmd_filepath).resolve() + + import subprocess + import tempfile + + fd, filename = tempfile.mkstemp() + # TODO: This never terminates if the passed generator never terminates. + text = "".join(generator) + if not color: + text = strip_ansi(text) + encoding = get_best_encoding(sys.stdout) + with open_stream(filename, "wb")[0] as f: + f.write(text.encode(encoding)) + try: + subprocess.call([str(cmd_path), filename]) + except OSError: + # Command not found + pass + finally: + os.close(fd) + os.unlink(filename) + + return True + + +def _nullpager( + stream: t.TextIO, generator: cabc.Iterable[str], color: bool | None +) -> None: + """Simply print unformatted text. This is the ultimate fallback.""" + for text in generator: + if not color: + text = strip_ansi(text) + stream.write(text) + + +class Editor: + def __init__( + self, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + ) -> None: + self.editor = editor + self.env = env + self.require_save = require_save + self.extension = extension + + def get_editor(self) -> str: + if self.editor is not None: + return self.editor + for key in "VISUAL", "EDITOR": + rv = os.environ.get(key) + if rv: + return rv + if WIN: + return "notepad" + + from shutil import which + + for editor in "sensible-editor", "vim", "nano": + if which(editor) is not None: + return editor + return "vi" + + def edit_files(self, filenames: cabc.Iterable[str]) -> None: + import subprocess + + editor = self.get_editor() + environ: dict[str, str] | None = None + + if self.env: + environ = os.environ.copy() + environ.update(self.env) + + exc_filename = " ".join(f'"{filename}"' for filename in filenames) + + try: + c = subprocess.Popen( + args=f"{editor} {exc_filename}", env=environ, shell=True + ) + exit_code = c.wait() + if exit_code != 0: + raise ClickException( + _("{editor}: Editing failed").format(editor=editor) + ) + except OSError as e: + raise ClickException( + _("{editor}: Editing failed: {e}").format(editor=editor, e=e) + ) from e + + @t.overload + def edit(self, text: bytes | bytearray) -> bytes | None: ... + + # We cannot know whether or not the type expected is str or bytes when None + # is passed, so str is returned as that was what was done before. + @t.overload + def edit(self, text: str | None) -> str | None: ... + + def edit(self, text: str | bytes | bytearray | None) -> str | bytes | None: + import tempfile + + if text is None: + data: bytes | bytearray = b"" + elif isinstance(text, (bytes, bytearray)): + data = text + else: + if text and not text.endswith("\n"): + text += "\n" + + if WIN: + data = text.replace("\n", "\r\n").encode("utf-8-sig") + else: + data = text.encode("utf-8") + + fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) + f: t.BinaryIO + + try: + with os.fdopen(fd, "wb") as f: + f.write(data) + + # If the filesystem resolution is 1 second, like Mac OS + # 10.12 Extended, or 2 seconds, like FAT32, and the editor + # closes very fast, require_save can fail. Set the modified + # time to be 2 seconds in the past to work around this. + os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2)) + # Depending on the resolution, the exact value might not be + # recorded, so get the new recorded value. + timestamp = os.path.getmtime(name) + + self.edit_files((name,)) + + if self.require_save and os.path.getmtime(name) == timestamp: + return None + + with open(name, "rb") as f: + rv = f.read() + + if isinstance(text, (bytes, bytearray)): + return rv + + return rv.decode("utf-8-sig").replace("\r\n", "\n") + finally: + os.unlink(name) + + +def open_url(url: str, wait: bool = False, locate: bool = False) -> int: + import subprocess + + def _unquote_file(url: str) -> str: + from urllib.parse import unquote + + if url.startswith("file://"): + url = unquote(url[7:]) + + return url + + if sys.platform == "darwin": + args = ["open"] + if wait: + args.append("-W") + if locate: + args.append("-R") + args.append(_unquote_file(url)) + null = open("/dev/null", "w") + try: + return subprocess.Popen(args, stderr=null).wait() + finally: + null.close() + elif WIN: + if locate: + url = _unquote_file(url) + args = ["explorer", f"/select,{url}"] + else: + args = ["start"] + if wait: + args.append("/WAIT") + args.append("") + args.append(url) + try: + return subprocess.call(args) + except OSError: + # Command not found + return 127 + elif CYGWIN: + if locate: + url = _unquote_file(url) + args = ["cygstart", os.path.dirname(url)] + else: + args = ["cygstart"] + if wait: + args.append("-w") + args.append(url) + try: + return subprocess.call(args) + except OSError: + # Command not found + return 127 + + try: + if locate: + url = os.path.dirname(_unquote_file(url)) or "." + else: + url = _unquote_file(url) + c = subprocess.Popen(["xdg-open", url]) + if wait: + return c.wait() + return 0 + except OSError: + if url.startswith(("http://", "https://")) and not locate and not wait: + import webbrowser + + webbrowser.open(url) + return 0 + return 1 + + +def _translate_ch_to_exc(ch: str) -> None: + if ch == "\x03": + raise KeyboardInterrupt() + + if ch == "\x04" and not WIN: # Unix-like, Ctrl+D + raise EOFError() + + if ch == "\x1a" and WIN: # Windows, Ctrl+Z + raise EOFError() + + return None + + +if sys.platform == "win32": + import msvcrt + + @contextlib.contextmanager + def raw_terminal() -> cabc.Iterator[int]: + yield -1 + + def getchar(echo: bool) -> str: + # The function `getch` will return a bytes object corresponding to + # the pressed character. Since Windows 10 build 1803, it will also + # return \x00 when called a second time after pressing a regular key. + # + # `getwch` does not share this probably-bugged behavior. Moreover, it + # returns a Unicode object by default, which is what we want. + # + # Either of these functions will return \x00 or \xe0 to indicate + # a special key, and you need to call the same function again to get + # the "rest" of the code. The fun part is that \u00e0 is + # "latin small letter a with grave", so if you type that on a French + # keyboard, you _also_ get a \xe0. + # E.g., consider the Up arrow. This returns \xe0 and then \x48. The + # resulting Unicode string reads as "a with grave" + "capital H". + # This is indistinguishable from when the user actually types + # "a with grave" and then "capital H". + # + # When \xe0 is returned, we assume it's part of a special-key sequence + # and call `getwch` again, but that means that when the user types + # the \u00e0 character, `getchar` doesn't return until a second + # character is typed. + # The alternative is returning immediately, but that would mess up + # cross-platform handling of arrow keys and others that start with + # \xe0. Another option is using `getch`, but then we can't reliably + # read non-ASCII characters, because return values of `getch` are + # limited to the current 8-bit codepage. + # + # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` + # is doing the right thing in more situations than with `getch`. + + if echo: + func = t.cast(t.Callable[[], str], msvcrt.getwche) + else: + func = t.cast(t.Callable[[], str], msvcrt.getwch) + + rv = func() + + if rv in ("\x00", "\xe0"): + # \x00 and \xe0 are control characters that indicate special key, + # see above. + rv += func() + + _translate_ch_to_exc(rv) + return rv + +else: + import termios + import tty + + @contextlib.contextmanager + def raw_terminal() -> cabc.Iterator[int]: + f: t.TextIO | None + fd: int + + if not isatty(sys.stdin): + f = open("/dev/tty") + fd = f.fileno() + else: + fd = sys.stdin.fileno() + f = None + + try: + old_settings = termios.tcgetattr(fd) + + try: + tty.setraw(fd) + yield fd + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + sys.stdout.flush() + + if f is not None: + f.close() + except termios.error: + pass + + def getchar(echo: bool) -> str: + with raw_terminal() as fd: + ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace") + + if echo and isatty(sys.stdout): + sys.stdout.write(ch) + + _translate_ch_to_exc(ch) + return ch diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/_textwrap.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_textwrap.py new file mode 100644 index 0000000..97fbee3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_textwrap.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +import collections.abc as cabc +import textwrap +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + def _handle_long_word( + self, + reversed_chunks: list[str], + cur_line: list[str], + cur_len: int, + width: int, + ) -> None: + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent: str) -> cabc.Iterator[None]: + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text: str) -> str: + rv = [] + + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + + if idx > 0: + indent = self.subsequent_indent + + rv.append(f"{indent}{line}") + + return "\n".join(rv) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/_utils.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_utils.py new file mode 100644 index 0000000..09fb008 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_utils.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import enum +import typing as t + + +class Sentinel(enum.Enum): + """Enum used to define sentinel values. + + .. seealso:: + + `PEP 661 - Sentinel Values `_. + """ + + UNSET = object() + FLAG_NEEDS_VALUE = object() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}.{self.name}" + + +UNSET = Sentinel.UNSET +"""Sentinel used to indicate that a value is not set.""" + +FLAG_NEEDS_VALUE = Sentinel.FLAG_NEEDS_VALUE +"""Sentinel used to indicate an option was passed as a flag without a +value but is not a flag option. + +``Option.consume_value`` uses this to prompt or use the ``flag_value``. +""" + +T_UNSET = t.Literal[UNSET] # type: ignore[valid-type] +"""Type hint for the :data:`UNSET` sentinel value.""" + +T_FLAG_NEEDS_VALUE = t.Literal[FLAG_NEEDS_VALUE] # type: ignore[valid-type] +"""Type hint for the :data:`FLAG_NEEDS_VALUE` sentinel value.""" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/_winconsole.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_winconsole.py new file mode 100644 index 0000000..e56c7c6 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/_winconsole.py @@ -0,0 +1,296 @@ +# This module is based on the excellent work by Adam Bartoš who +# provided a lot of what went into the implementation here in +# the discussion to issue1602 in the Python bug tracker. +# +# There are some general differences in regards to how this works +# compared to the original patches as we do not need to patch +# the entire interpreter but just work in our little world of +# echo and prompt. +from __future__ import annotations + +import collections.abc as cabc +import io +import sys +import time +import typing as t +from ctypes import Array +from ctypes import byref +from ctypes import c_char +from ctypes import c_char_p +from ctypes import c_int +from ctypes import c_ssize_t +from ctypes import c_ulong +from ctypes import c_void_p +from ctypes import POINTER +from ctypes import py_object +from ctypes import Structure +from ctypes.wintypes import DWORD +from ctypes.wintypes import HANDLE +from ctypes.wintypes import LPCWSTR +from ctypes.wintypes import LPWSTR + +from ._compat import _NonClosingTextIOWrapper + +assert sys.platform == "win32" +import msvcrt # noqa: E402 +from ctypes import windll # noqa: E402 +from ctypes import WINFUNCTYPE # noqa: E402 + +c_ssize_p = POINTER(c_ssize_t) + +kernel32 = windll.kernel32 +GetStdHandle = kernel32.GetStdHandle +ReadConsoleW = kernel32.ReadConsoleW +WriteConsoleW = kernel32.WriteConsoleW +GetConsoleMode = kernel32.GetConsoleMode +GetLastError = kernel32.GetLastError +GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ("CommandLineToArgvW", windll.shell32) +) +LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32)) + +STDIN_HANDLE = GetStdHandle(-10) +STDOUT_HANDLE = GetStdHandle(-11) +STDERR_HANDLE = GetStdHandle(-12) + +PyBUF_SIMPLE = 0 +PyBUF_WRITABLE = 1 + +ERROR_SUCCESS = 0 +ERROR_NOT_ENOUGH_MEMORY = 8 +ERROR_OPERATION_ABORTED = 995 + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +EOF = b"\x1a" +MAX_BYTES_WRITTEN = 32767 + +if t.TYPE_CHECKING: + try: + # Using `typing_extensions.Buffer` instead of `collections.abc` + # on Windows for some reason does not have `Sized` implemented. + from collections.abc import Buffer # type: ignore + except ImportError: + from typing_extensions import Buffer + +try: + from ctypes import pythonapi +except ImportError: + # On PyPy we cannot get buffers so our ability to operate here is + # severely limited. + get_buffer = None +else: + + class Py_buffer(Structure): + _fields_ = [ # noqa: RUF012 + ("buf", c_void_p), + ("obj", py_object), + ("len", c_ssize_t), + ("itemsize", c_ssize_t), + ("readonly", c_int), + ("ndim", c_int), + ("format", c_char_p), + ("shape", c_ssize_p), + ("strides", c_ssize_p), + ("suboffsets", c_ssize_p), + ("internal", c_void_p), + ] + + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer + PyBuffer_Release = pythonapi.PyBuffer_Release + + def get_buffer(obj: Buffer, writable: bool = False) -> Array[c_char]: + buf = Py_buffer() + flags: int = PyBUF_WRITABLE if writable else PyBUF_SIMPLE + PyObject_GetBuffer(py_object(obj), byref(buf), flags) + + try: + buffer_type = c_char * buf.len + out: Array[c_char] = buffer_type.from_address(buf.buf) + return out + finally: + PyBuffer_Release(byref(buf)) + + +class _WindowsConsoleRawIOBase(io.RawIOBase): + def __init__(self, handle: int | None) -> None: + self.handle = handle + + def isatty(self) -> t.Literal[True]: + super().isatty() + return True + + +class _WindowsConsoleReader(_WindowsConsoleRawIOBase): + def readable(self) -> t.Literal[True]: + return True + + def readinto(self, b: Buffer) -> int: + bytes_to_be_read = len(b) + if not bytes_to_be_read: + return 0 + elif bytes_to_be_read % 2: + raise ValueError( + "cannot read odd number of bytes from UTF-16-LE encoded console" + ) + + buffer = get_buffer(b, writable=True) + code_units_to_be_read = bytes_to_be_read // 2 + code_units_read = c_ulong() + + rv = ReadConsoleW( + HANDLE(self.handle), + buffer, + code_units_to_be_read, + byref(code_units_read), + None, + ) + if GetLastError() == ERROR_OPERATION_ABORTED: + # wait for KeyboardInterrupt + time.sleep(0.1) + if not rv: + raise OSError(f"Windows error: {GetLastError()}") + + if buffer[0] == EOF: + return 0 + return 2 * code_units_read.value + + +class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): + def writable(self) -> t.Literal[True]: + return True + + @staticmethod + def _get_error_message(errno: int) -> str: + if errno == ERROR_SUCCESS: + return "ERROR_SUCCESS" + elif errno == ERROR_NOT_ENOUGH_MEMORY: + return "ERROR_NOT_ENOUGH_MEMORY" + return f"Windows error {errno}" + + def write(self, b: Buffer) -> int: + bytes_to_be_written = len(b) + buf = get_buffer(b) + code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 + code_units_written = c_ulong() + + WriteConsoleW( + HANDLE(self.handle), + buf, + code_units_to_be_written, + byref(code_units_written), + None, + ) + bytes_written = 2 * code_units_written.value + + if bytes_written == 0 and bytes_to_be_written > 0: + raise OSError(self._get_error_message(GetLastError())) + return bytes_written + + +class ConsoleStream: + def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None: + self._text_stream = text_stream + self.buffer = byte_stream + + @property + def name(self) -> str: + return self.buffer.name + + def write(self, x: t.AnyStr) -> int: + if isinstance(x, str): + return self._text_stream.write(x) + try: + self.flush() + except Exception: + pass + return self.buffer.write(x) + + def writelines(self, lines: cabc.Iterable[t.AnyStr]) -> None: + for line in lines: + self.write(line) + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._text_stream, name) + + def isatty(self) -> bool: + return self.buffer.isatty() + + def __repr__(self) -> str: + return f"" + + +def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO: + text_stream = _NonClosingTextIOWrapper( + io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), + "utf-16-le", + "strict", + line_buffering=True, + ) + return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream)) + + +_stream_factories: cabc.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = { + 0: _get_text_stdin, + 1: _get_text_stdout, + 2: _get_text_stderr, +} + + +def _is_console(f: t.TextIO) -> bool: + if not hasattr(f, "fileno"): + return False + + try: + fileno = f.fileno() + except (OSError, io.UnsupportedOperation): + return False + + handle = msvcrt.get_osfhandle(fileno) + return bool(GetConsoleMode(handle, byref(DWORD()))) + + +def _get_windows_console_stream( + f: t.TextIO, encoding: str | None, errors: str | None +) -> t.TextIO | None: + if ( + get_buffer is None + or encoding not in {"utf-16-le", None} + or errors not in {"strict", None} + or not _is_console(f) + ): + return None + + func = _stream_factories.get(f.fileno()) + if func is None: + return None + + b = getattr(f, "buffer", None) + + if b is None: + return None + + return func(b) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/core.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/core.py new file mode 100644 index 0000000..ff2f74a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/core.py @@ -0,0 +1,3347 @@ +from __future__ import annotations + +import collections.abc as cabc +import enum +import errno +import inspect +import os +import sys +import typing as t +from collections import abc +from collections import Counter +from contextlib import AbstractContextManager +from contextlib import contextmanager +from contextlib import ExitStack +from functools import update_wrapper +from gettext import gettext as _ +from gettext import ngettext +from itertools import repeat +from types import TracebackType + +from . import types +from ._utils import FLAG_NEEDS_VALUE +from ._utils import UNSET +from .exceptions import Abort +from .exceptions import BadParameter +from .exceptions import ClickException +from .exceptions import Exit +from .exceptions import MissingParameter +from .exceptions import NoArgsIsHelpError +from .exceptions import UsageError +from .formatting import HelpFormatter +from .formatting import join_options +from .globals import pop_context +from .globals import push_context +from .parser import _OptionParser +from .parser import _split_opt +from .termui import confirm +from .termui import prompt +from .termui import style +from .utils import _detect_program_name +from .utils import _expand_args +from .utils import echo +from .utils import make_default_short_help +from .utils import make_str +from .utils import PacifyFlushWrapper + +if t.TYPE_CHECKING: + from .shell_completion import CompletionItem + +F = t.TypeVar("F", bound="t.Callable[..., t.Any]") +V = t.TypeVar("V") + + +def _complete_visible_commands( + ctx: Context, incomplete: str +) -> cabc.Iterator[tuple[str, Command]]: + """List all the subcommands of a group that start with the + incomplete value and aren't hidden. + + :param ctx: Invocation context for the group. + :param incomplete: Value being completed. May be empty. + """ + multi = t.cast(Group, ctx.command) + + for name in multi.list_commands(ctx): + if name.startswith(incomplete): + command = multi.get_command(ctx, name) + + if command is not None and not command.hidden: + yield name, command + + +def _check_nested_chain( + base_command: Group, cmd_name: str, cmd: Command, register: bool = False +) -> None: + if not base_command.chain or not isinstance(cmd, Group): + return + + if register: + message = ( + f"It is not possible to add the group {cmd_name!r} to another" + f" group {base_command.name!r} that is in chain mode." + ) + else: + message = ( + f"Found the group {cmd_name!r} as subcommand to another group " + f" {base_command.name!r} that is in chain mode. This is not supported." + ) + + raise RuntimeError(message) + + +def batch(iterable: cabc.Iterable[V], batch_size: int) -> list[tuple[V, ...]]: + return list(zip(*repeat(iter(iterable), batch_size), strict=False)) + + +@contextmanager +def augment_usage_errors( + ctx: Context, param: Parameter | None = None +) -> cabc.Iterator[None]: + """Context manager that attaches extra information to exceptions.""" + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing( + invocation_order: cabc.Sequence[Parameter], + declaration_order: cabc.Sequence[Parameter], +) -> list[Parameter]: + """Returns all declared parameters in the order they should be processed. + + The declared parameters are re-shuffled depending on the order in which + they were invoked, as well as the eagerness of each parameters. + + The invocation order takes precedence over the declaration order. I.e. the + order in which the user provided them to the CLI is respected. + + This behavior and its effect on callback evaluation is detailed at: + https://click.palletsprojects.com/en/stable/advanced/#callback-evaluation-order + """ + + def sort_key(item: Parameter) -> tuple[bool, float]: + try: + idx: float = invocation_order.index(item) + except ValueError: + idx = float("inf") + + return not item.is_eager, idx + + return sorted(declaration_order, key=sort_key) + + +class ParameterSource(enum.Enum): + """This is an :class:`~enum.Enum` that indicates the source of a + parameter's value. + + Use :meth:`click.Context.get_parameter_source` to get the + source for a parameter by name. + + .. versionchanged:: 8.0 + Use :class:`~enum.Enum` and drop the ``validate`` method. + + .. versionchanged:: 8.0 + Added the ``PROMPT`` value. + """ + + COMMANDLINE = enum.auto() + """The value was provided by the command line args.""" + ENVIRONMENT = enum.auto() + """The value was provided with an environment variable.""" + DEFAULT = enum.auto() + """Used the default specified by the parameter.""" + DEFAULT_MAP = enum.auto() + """Used a default provided by :attr:`Context.default_map`.""" + PROMPT = enum.auto() + """Used a prompt to confirm a default or provide a value.""" + + +class Context: + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. Default values will also be + ignored. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + :param show_default: Show the default value for commands. If this + value is not set, it defaults to the value from the parent + context. ``Command.show_default`` overrides this default for the + specific command. + + .. versionchanged:: 8.2 + The ``protected_args`` attribute is deprecated and will be removed in + Click 9.0. ``args`` will contain remaining unparsed tokens. + + .. versionchanged:: 8.1 + The ``show_default`` parameter is overridden by + ``Command.show_default``, instead of the other way around. + + .. versionchanged:: 8.0 + The ``show_default`` parameter defaults to the value from the + parent context. + + .. versionchanged:: 7.1 + Added the ``show_default`` parameter. + + .. versionchanged:: 4.0 + Added the ``color``, ``ignore_unknown_options``, and + ``max_content_width`` parameters. + + .. versionchanged:: 3.0 + Added the ``allow_extra_args`` and ``allow_interspersed_args`` + parameters. + + .. versionchanged:: 2.0 + Added the ``resilient_parsing``, ``help_option_names``, and + ``token_normalize_func`` parameters. + """ + + #: The formatter class to create with :meth:`make_formatter`. + #: + #: .. versionadded:: 8.0 + formatter_class: type[HelpFormatter] = HelpFormatter + + def __init__( + self, + command: Command, + parent: Context | None = None, + info_name: str | None = None, + obj: t.Any | None = None, + auto_envvar_prefix: str | None = None, + default_map: cabc.MutableMapping[str, t.Any] | None = None, + terminal_width: int | None = None, + max_content_width: int | None = None, + resilient_parsing: bool = False, + allow_extra_args: bool | None = None, + allow_interspersed_args: bool | None = None, + ignore_unknown_options: bool | None = None, + help_option_names: list[str] | None = None, + token_normalize_func: t.Callable[[str], str] | None = None, + color: bool | None = None, + show_default: bool | None = None, + ) -> None: + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: Map of parameter names to their parsed values. Parameters + #: with ``expose_value=False`` are not stored. + self.params: dict[str, t.Any] = {} + #: the leftover arguments. + self.args: list[str] = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self._protected_args: list[str] = [] + #: the collected prefixes of the command's options. + self._opt_prefixes: set[str] = set(parent._opt_prefixes) if parent else set() + + if obj is None and parent is not None: + obj = parent.obj + + #: the user object stored. + self.obj: t.Any = obj + self._meta: dict[str, t.Any] = getattr(parent, "meta", {}) + + #: A dictionary (-like object) with defaults for parameters. + if ( + default_map is None + and info_name is not None + and parent is not None + and parent.default_map is not None + ): + default_map = parent.default_map.get(info_name) + + self.default_map: cabc.MutableMapping[str, t.Any] | None = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`result_callback`. + self.invoked_subcommand: str | None = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + + #: The width of the terminal (None is autodetection). + self.terminal_width: int | None = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width: int | None = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args: bool = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options: bool = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ["--help"] + + #: The names for the help options. + self.help_option_names: list[str] = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func: t.Callable[[str], str] | None = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures and default values + #: will be ignored. Useful for completion. + self.resilient_parsing: bool = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if ( + parent is not None + and parent.auto_envvar_prefix is not None + and self.info_name is not None + ): + auto_envvar_prefix = ( + f"{parent.auto_envvar_prefix}_{self.info_name.upper()}" + ) + else: + auto_envvar_prefix = auto_envvar_prefix.upper() + + if auto_envvar_prefix is not None: + auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") + + self.auto_envvar_prefix: str | None = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color: bool | None = color + + if show_default is None and parent is not None: + show_default = parent.show_default + + #: Show option default values when formatting help text. + self.show_default: bool | None = show_default + + self._close_callbacks: list[t.Callable[[], t.Any]] = [] + self._depth = 0 + self._parameter_source: dict[str, ParameterSource] = {} + self._exit_stack = ExitStack() + + @property + def protected_args(self) -> list[str]: + import warnings + + warnings.warn( + "'protected_args' is deprecated and will be removed in Click 9.0." + " 'args' will contain remaining unparsed tokens.", + DeprecationWarning, + stacklevel=2, + ) + return self._protected_args + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. This traverses the entire CLI + structure. + + .. code-block:: python + + with Context(cli) as ctx: + info = ctx.to_info_dict() + + .. versionadded:: 8.0 + """ + return { + "command": self.command.to_info_dict(self), + "info_name": self.info_name, + "allow_extra_args": self.allow_extra_args, + "allow_interspersed_args": self.allow_interspersed_args, + "ignore_unknown_options": self.ignore_unknown_options, + "auto_envvar_prefix": self.auto_envvar_prefix, + } + + def __enter__(self) -> Context: + self._depth += 1 + push_context(self) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> bool | None: + self._depth -= 1 + exit_result: bool | None = None + if self._depth == 0: + exit_result = self._close_with_exception_info(exc_type, exc_value, tb) + pop_context() + + return exit_result + + @contextmanager + def scope(self, cleanup: bool = True) -> cabc.Iterator[Context]: + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self) -> dict[str, t.Any]: + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utilities can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = f'{__name__}.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self) -> HelpFormatter: + """Creates the :class:`~click.HelpFormatter` for the help and + usage output. + + To quickly customize the formatter class used without overriding + this method, set the :attr:`formatter_class` attribute. + + .. versionchanged:: 8.0 + Added the :attr:`formatter_class` attribute. + """ + return self.formatter_class( + width=self.terminal_width, max_width=self.max_content_width + ) + + def with_resource(self, context_manager: AbstractContextManager[V]) -> V: + """Register a resource as if it were used in a ``with`` + statement. The resource will be cleaned up when the context is + popped. + + Uses :meth:`contextlib.ExitStack.enter_context`. It calls the + resource's ``__enter__()`` method and returns the result. When + the context is popped, it closes the stack, which calls the + resource's ``__exit__()`` method. + + To register a cleanup function for something that isn't a + context manager, use :meth:`call_on_close`. Or use something + from :mod:`contextlib` to turn it into a context manager first. + + .. code-block:: python + + @click.group() + @click.option("--name") + @click.pass_context + def cli(ctx): + ctx.obj = ctx.with_resource(connect_db(name)) + + :param context_manager: The context manager to enter. + :return: Whatever ``context_manager.__enter__()`` returns. + + .. versionadded:: 8.0 + """ + return self._exit_stack.enter_context(context_manager) + + def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: + """Register a function to be called when the context tears down. + + This can be used to close resources opened during the script + execution. Resources that support Python's context manager + protocol which would be used in a ``with`` statement should be + registered with :meth:`with_resource` instead. + + :param f: The function to execute on teardown. + """ + return self._exit_stack.callback(f) + + def close(self) -> None: + """Invoke all close callbacks registered with + :meth:`call_on_close`, and exit all context managers entered + with :meth:`with_resource`. + """ + self._close_with_exception_info(None, None, None) + + def _close_with_exception_info( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> bool | None: + """Unwind the exit stack by calling its :meth:`__exit__` providing the exception + information to allow for exception handling by the various resources registered + using :meth;`with_resource` + + :return: Whatever ``exit_stack.__exit__()`` returns. + """ + exit_result = self._exit_stack.__exit__(exc_type, exc_value, tb) + # In case the context is reused, create a new exit stack. + self._exit_stack = ExitStack() + + return exit_result + + @property + def command_path(self) -> str: + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = "" + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + parent_command_path = [self.parent.command_path] + + if isinstance(self.parent.command, Command): + for param in self.parent.command.get_params(self): + parent_command_path.extend(param.get_usage_pieces(self)) + + rv = f"{' '.join(parent_command_path)} {rv}" + return rv.lstrip() + + def find_root(self) -> Context: + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type: type[V]) -> V | None: + """Finds the closest object of a given type.""" + node: Context | None = self + + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + + node = node.parent + + return None + + def ensure_object(self, object_type: type[V]) -> V: + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + @t.overload + def lookup_default( + self, name: str, call: t.Literal[True] = True + ) -> t.Any | None: ... + + @t.overload + def lookup_default( + self, name: str, call: t.Literal[False] = ... + ) -> t.Any | t.Callable[[], t.Any] | None: ... + + def lookup_default(self, name: str, call: bool = True) -> t.Any | None: + """Get the default for a parameter from :attr:`default_map`. + + :param name: Name of the parameter. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + if self.default_map is not None: + value = self.default_map.get(name, UNSET) + + if call and callable(value): + return value() + + return value + + return UNSET + + def fail(self, message: str) -> t.NoReturn: + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self) -> t.NoReturn: + """Aborts the script.""" + raise Abort() + + def exit(self, code: int = 0) -> t.NoReturn: + """Exits the application with a given exit code. + + .. versionchanged:: 8.2 + Callbacks and context managers registered with :meth:`call_on_close` + and :meth:`with_resource` are closed before exiting. + """ + self.close() + raise Exit(code) + + def get_usage(self) -> str: + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self) -> str: + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def _make_sub_context(self, command: Command) -> Context: + """Create a new context of the same type as this context, but + for a new command. + + :meta private: + """ + return type(self)(command, info_name=command.name, parent=self) + + @t.overload + def invoke( + self, callback: t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any + ) -> V: ... + + @t.overload + def invoke(self, callback: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any: ... + + def invoke( + self, callback: Command | t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any + ) -> t.Any | V: + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if :meth:`forward` is called at multiple levels. + + .. versionchanged:: 3.2 + A new context is created, and missing arguments use default values. + """ + if isinstance(callback, Command): + other_cmd = callback + + if other_cmd.callback is None: + raise TypeError( + "The given command does not have a callback that can be invoked." + ) + else: + callback = t.cast("t.Callable[..., V]", other_cmd.callback) + + ctx = self._make_sub_context(other_cmd) + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.type_cast_value( # type: ignore + ctx, param.get_default(ctx) + ) + + # Track all kwargs as params, so that forward() will pass + # them on in subsequent calls. + ctx.params.update(kwargs) + else: + ctx = self + + with augment_usage_errors(self): + with ctx: + return callback(*args, **kwargs) + + def forward(self, cmd: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + + .. versionchanged:: 8.0 + All ``kwargs`` are tracked in :attr:`params` so they will be + passed if ``forward`` is called at multiple levels. + """ + # Can only forward to other commands, not direct callbacks. + if not isinstance(cmd, Command): + raise TypeError("Callback is not a command.") + + for param in self.params: + if param not in kwargs: + kwargs[param] = self.params[param] + + return self.invoke(cmd, *args, **kwargs) + + def set_parameter_source(self, name: str, source: ParameterSource) -> None: + """Set the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + :param name: The name of the parameter. + :param source: A member of :class:`~click.core.ParameterSource`. + """ + self._parameter_source[name] = source + + def get_parameter_source(self, name: str) -> ParameterSource | None: + """Get the source of a parameter. This indicates the location + from which the value of the parameter was obtained. + + This can be useful for determining when a user specified a value + on the command line that is the same as the default value. It + will be :attr:`~click.core.ParameterSource.DEFAULT` only if the + value was actually taken from the default. + + :param name: The name of the parameter. + :rtype: ParameterSource + + .. versionchanged:: 8.0 + Returns ``None`` if the parameter was not provided from any + source. + """ + return self._parameter_source.get(name) + + +class Command: + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is disabled by default. + If enabled this will add ``--help`` as argument + if no arguments are passed + :param hidden: hide this command from help outputs. + :param deprecated: If ``True`` or non-empty string, issues a message + indicating that the command is deprecated and highlights + its deprecation in --help. The message can be customized + by using a string as the value. + + .. versionchanged:: 8.2 + This is the base class for all commands, not ``BaseCommand``. + ``deprecated`` can be set to a string as well to customize the + deprecation message. + + .. versionchanged:: 8.1 + ``help``, ``epilog``, and ``short_help`` are stored unprocessed, + all formatting is done when outputting help text, not at init, + and is done even if not using the ``@command`` decorator. + + .. versionchanged:: 8.0 + Added a ``repr`` showing the command name. + + .. versionchanged:: 7.1 + Added the ``no_args_is_help`` parameter. + + .. versionchanged:: 2.0 + Added the ``context_settings`` parameter. + """ + + #: The context class to create with :meth:`make_context`. + #: + #: .. versionadded:: 8.0 + context_class: type[Context] = Context + + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__( + self, + name: str | None, + context_settings: cabc.MutableMapping[str, t.Any] | None = None, + callback: t.Callable[..., t.Any] | None = None, + params: list[Parameter] | None = None, + help: str | None = None, + epilog: str | None = None, + short_help: str | None = None, + options_metavar: str | None = "[OPTIONS]", + add_help_option: bool = True, + no_args_is_help: bool = False, + hidden: bool = False, + deprecated: bool | str = False, + ) -> None: + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + + if context_settings is None: + context_settings = {} + + #: an optional dictionary with defaults passed to the context. + self.context_settings: cabc.MutableMapping[str, t.Any] = context_settings + + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params: list[Parameter] = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + self.short_help = short_help + self.add_help_option = add_help_option + self._help_option = None + self.no_args_is_help = no_args_is_help + self.hidden = hidden + self.deprecated = deprecated + + def to_info_dict(self, ctx: Context) -> dict[str, t.Any]: + return { + "name": self.name, + "params": [param.to_info_dict() for param in self.get_params(ctx)], + "help": self.help, + "epilog": self.epilog, + "short_help": self.short_help, + "hidden": self.hidden, + "deprecated": self.deprecated, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def get_usage(self, ctx: Context) -> str: + """Formats the usage line into a string and returns it. + + Calls :meth:`format_usage` internally. + """ + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_params(self, ctx: Context) -> list[Parameter]: + params = self.params + help_option = self.get_help_option(ctx) + + if help_option is not None: + params = [*params, help_option] + + if __debug__: + import warnings + + opts = [opt for param in params for opt in param.opts] + opts_counter = Counter(opts) + duplicate_opts = (opt for opt, count in opts_counter.items() if count > 1) + + for duplicate_opt in duplicate_opts: + warnings.warn( + ( + f"The parameter {duplicate_opt} is used more than once. " + "Remove its duplicate as parameters should be unique." + ), + stacklevel=3, + ) + + return params + + def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the usage line into the formatter. + + This is a low-level method called by :meth:`get_usage`. + """ + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, " ".join(pieces)) + + def collect_usage_pieces(self, ctx: Context) -> list[str]: + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] if self.options_metavar else [] + + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + + return rv + + def get_help_option_names(self, ctx: Context) -> list[str]: + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return list(all_names) + + def get_help_option(self, ctx: Context) -> Option | None: + """Returns the help option object. + + Skipped if :attr:`add_help_option` is ``False``. + + .. versionchanged:: 8.1.8 + The help option is now cached to avoid creating it multiple times. + """ + help_option_names = self.get_help_option_names(ctx) + + if not help_option_names or not self.add_help_option: + return None + + # Cache the help option object in private _help_option attribute to + # avoid creating it multiple times. Not doing this will break the + # callback odering by iter_params_for_processing(), which relies on + # object comparison. + if self._help_option is None: + # Avoid circular import. + from .decorators import help_option + + # Apply help_option decorator and pop resulting option + help_option(*help_option_names)(self) + self._help_option = self.params.pop() # type: ignore[assignment] + + return self._help_option + + def make_parser(self, ctx: Context) -> _OptionParser: + """Creates the underlying option parser for this command.""" + parser = _OptionParser(ctx) + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx: Context) -> str: + """Formats the help into a string and returns it. + + Calls :meth:`format_help` internally. + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip("\n") + + def get_short_help_str(self, limit: int = 45) -> str: + """Gets short help for the command or makes it by shortening the + long help string. + """ + if self.short_help: + text = inspect.cleandoc(self.short_help) + elif self.help: + text = make_default_short_help(self.help, limit) + else: + text = "" + + if self.deprecated: + deprecated_message = ( + f"(DEPRECATED: {self.deprecated})" + if isinstance(self.deprecated, str) + else "(DEPRECATED)" + ) + text = _("{text} {deprecated_message}").format( + text=text, deprecated_message=deprecated_message + ) + + return text.strip() + + def format_help(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help into the formatter if it exists. + + This is a low-level method called by :meth:`get_help`. + + This calls the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the help text to the formatter if it exists.""" + if self.help is not None: + # truncate the help text to the first form feed + text = inspect.cleandoc(self.help).partition("\f")[0] + else: + text = "" + + if self.deprecated: + deprecated_message = ( + f"(DEPRECATED: {self.deprecated})" + if isinstance(self.deprecated, str) + else "(DEPRECATED)" + ) + text = _("{text} {deprecated_message}").format( + text=text, deprecated_message=deprecated_message + ) + + if text: + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(text) + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section(_("Options")): + formatter.write_dl(opts) + + def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None: + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + epilog = inspect.cleandoc(self.epilog) + formatter.write_paragraph() + + with formatter.indentation(): + formatter.write_text(epilog) + + def make_context( + self, + info_name: str | None, + args: list[str], + parent: Context | None = None, + **extra: t.Any, + ) -> Context: + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + To quickly customize the context class used without overriding + this method, set the :attr:`context_class` attribute. + + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it's + the name of the command. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + + .. versionchanged:: 8.0 + Added the :attr:`context_class` attribute. + """ + for key, value in self.context_settings.items(): + if key not in extra: + extra[key] = value + + ctx = self.context_class(self, info_name=info_name, parent=parent, **extra) + + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx: Context, args: list[str]) -> list[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + raise NoArgsIsHelpError(ctx) + + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing(param_order, self.get_params(ctx)): + _, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail( + ngettext( + "Got unexpected extra argument ({args})", + "Got unexpected extra arguments ({args})", + len(args), + ).format(args=" ".join(map(str, args))) + ) + + ctx.args = args + ctx._opt_prefixes.update(parser._opt_prefixes) + return args + + def invoke(self, ctx: Context) -> t.Any: + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.deprecated: + extra_message = ( + f" {self.deprecated}" if isinstance(self.deprecated, str) else "" + ) + message = _( + "DeprecationWarning: The command {name!r} is deprecated.{extra_message}" + ).format(name=self.name, extra_message=extra_message) + echo(style(message, fg="red"), err=True) + + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. Looks + at the names of options and chained multi-commands. + + Any command could be part of a chained multi-command, so sibling + commands are valid at any point during command completion. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results: list[CompletionItem] = [] + + if incomplete and not incomplete[0].isalnum(): + for param in self.get_params(ctx): + if ( + not isinstance(param, Option) + or param.hidden + or ( + not param.multiple + and ctx.get_parameter_source(param.name) # type: ignore + is ParameterSource.COMMANDLINE + ) + ): + continue + + results.extend( + CompletionItem(name, help=param.help) + for name in [*param.opts, *param.secondary_opts] + if name.startswith(incomplete) + ) + + while ctx.parent is not None: + ctx = ctx.parent + + if isinstance(ctx.command, Group) and ctx.command.chain: + results.extend( + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + if name not in ctx._protected_args + ) + + return results + + @t.overload + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: t.Literal[True] = True, + **extra: t.Any, + ) -> t.NoReturn: ... + + @t.overload + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: bool = ..., + **extra: t.Any, + ) -> t.Any: ... + + def main( + self, + args: cabc.Sequence[str] | None = None, + prog_name: str | None = None, + complete_var: str | None = None, + standalone_mode: bool = True, + windows_expand_args: bool = True, + **extra: t.Any, + ) -> t.Any: + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog_name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param windows_expand_args: Expand glob patterns, user dir, and + env vars in command line args on Windows. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + + .. versionchanged:: 8.0.1 + Added the ``windows_expand_args`` parameter to allow + disabling command line arg expansion on Windows. + + .. versionchanged:: 8.0 + When taking arguments from ``sys.argv`` on Windows, glob + patterns, user dir, and env vars are expanded. + + .. versionchanged:: 3.0 + Added the ``standalone_mode`` parameter. + """ + if args is None: + args = sys.argv[1:] + + if os.name == "nt" and windows_expand_args: + args = _expand_args(args) + else: + args = list(args) + + if prog_name is None: + prog_name = _detect_program_name() + + # Process shell completion requests and exit early. + self._main_shell_completion(extra, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + # it's not safe to `ctx.exit(rv)` here! + # note that `rv` may actually contain data like "1" which + # has obvious effects + # more subtle case: `rv=[None, None]` can come out of + # chained commands which all returned `None` -- so it's not + # even always obvious that `rv` indicates success/failure + # by its truthiness/falsiness + ctx.exit() + except (EOFError, KeyboardInterrupt) as e: + echo(file=sys.stderr) + raise Abort() from e + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except OSError as e: + if e.errno == errno.EPIPE: + sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout)) + sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr)) + sys.exit(1) + else: + raise + except Exit as e: + if standalone_mode: + sys.exit(e.exit_code) + else: + # in non-standalone mode, return the exit code + # note that this is only reached if `self.invoke` above raises + # an Exit explicitly -- thus bypassing the check there which + # would return its result + # the results of non-standalone execution may therefore be + # somewhat ambiguous: if there are codepaths which lead to + # `ctx.exit(1)` and to `return 1`, the caller won't be able to + # tell the difference between the two + return e.exit_code + except Abort: + if not standalone_mode: + raise + echo(_("Aborted!"), file=sys.stderr) + sys.exit(1) + + def _main_shell_completion( + self, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str | None = None, + ) -> None: + """Check if the shell is asking for tab completion, process + that, then exit early. Called from :meth:`main` before the + program is invoked. + + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. Defaults to + ``_{PROG_NAME}_COMPLETE``. + + .. versionchanged:: 8.2.0 + Dots (``.``) in ``prog_name`` are replaced with underscores (``_``). + """ + if complete_var is None: + complete_name = prog_name.replace("-", "_").replace(".", "_") + complete_var = f"_{complete_name}_COMPLETE".upper() + + instruction = os.environ.get(complete_var) + + if not instruction: + return + + from .shell_completion import shell_complete + + rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction) + sys.exit(rv) + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class _FakeSubclassCheck(type): + def __subclasscheck__(cls, subclass: type) -> bool: + return issubclass(subclass, cls.__bases__[0]) + + def __instancecheck__(cls, instance: t.Any) -> bool: + return isinstance(instance, cls.__bases__[0]) + + +class _BaseCommand(Command, metaclass=_FakeSubclassCheck): + """ + .. deprecated:: 8.2 + Will be removed in Click 9.0. Use ``Command`` instead. + """ + + +class Group(Command): + """A group is a command that nests other commands (or more groups). + + :param name: The name of the group command. + :param commands: Map names to :class:`Command` objects. Can be a list, which + will use :attr:`Command.name` as the keys. + :param invoke_without_command: Invoke the group's callback even if a + subcommand is not given. + :param no_args_is_help: If no arguments are given, show the group's help and + exit. Defaults to the opposite of ``invoke_without_command``. + :param subcommand_metavar: How to represent the subcommand argument in help. + The default will represent whether ``chain`` is set or not. + :param chain: Allow passing more than one subcommand argument. After parsing + a command's arguments, if any arguments remain another command will be + matched, and so on. + :param result_callback: A function to call after the group's and + subcommand's callbacks. The value returned by the subcommand is passed. + If ``chain`` is enabled, the value will be a list of values returned by + all the commands. If ``invoke_without_command`` is enabled, the value + will be the value returned by the group's callback, or an empty list if + ``chain`` is enabled. + :param kwargs: Other arguments passed to :class:`Command`. + + .. versionchanged:: 8.0 + The ``commands`` argument can be a list of command objects. + + .. versionchanged:: 8.2 + Merged with and replaces the ``MultiCommand`` base class. + """ + + allow_extra_args = True + allow_interspersed_args = False + + #: If set, this is used by the group's :meth:`command` decorator + #: as the default :class:`Command` class. This is useful to make all + #: subcommands use a custom command class. + #: + #: .. versionadded:: 8.0 + command_class: type[Command] | None = None + + #: If set, this is used by the group's :meth:`group` decorator + #: as the default :class:`Group` class. This is useful to make all + #: subgroups use a custom group class. + #: + #: If set to the special value :class:`type` (literally + #: ``group_class = type``), this group's class will be used as the + #: default class. This makes a custom group class continue to make + #: custom groups. + #: + #: .. versionadded:: 8.0 + group_class: type[Group] | type[type] | None = None + # Literal[type] isn't valid, so use Type[type] + + def __init__( + self, + name: str | None = None, + commands: cabc.MutableMapping[str, Command] + | cabc.Sequence[Command] + | None = None, + invoke_without_command: bool = False, + no_args_is_help: bool | None = None, + subcommand_metavar: str | None = None, + chain: bool = False, + result_callback: t.Callable[..., t.Any] | None = None, + **kwargs: t.Any, + ) -> None: + super().__init__(name, **kwargs) + + if commands is None: + commands = {} + elif isinstance(commands, abc.Sequence): + commands = {c.name: c for c in commands if c.name is not None} + + #: The registered subcommands by their exported names. + self.commands: cabc.MutableMapping[str, Command] = commands + + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + + if subcommand_metavar is None: + if chain: + subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." + else: + subcommand_metavar = "COMMAND [ARGS]..." + + self.subcommand_metavar = subcommand_metavar + self.chain = chain + # The result callback that is stored. This can be set or + # overridden with the :func:`result_callback` decorator. + self._result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError( + "A group in chain mode cannot have optional arguments." + ) + + def to_info_dict(self, ctx: Context) -> dict[str, t.Any]: + info_dict = super().to_info_dict(ctx) + commands = {} + + for name in self.list_commands(ctx): + command = self.get_command(ctx, name) + + if command is None: + continue + + sub_ctx = ctx._make_sub_context(command) + + with sub_ctx.scope(cleanup=False): + commands[name] = command.to_info_dict(sub_ctx) + + info_dict.update(commands=commands, chain=self.chain) + return info_dict + + def add_command(self, cmd: Command, name: str | None = None) -> None: + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError("Command has no name.") + _check_nested_chain(self, name, cmd, register=True) + self.commands[name] = cmd + + @t.overload + def command(self, __func: t.Callable[..., t.Any]) -> Command: ... + + @t.overload + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command]: ... + + def command( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Command] | Command: + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` and + immediately registers the created command with this group by + calling :meth:`add_command`. + + To customize the command class used, set the + :attr:`command_class` attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`command_class` attribute. + """ + from .decorators import command + + func: t.Callable[..., t.Any] | None = None + + if args and callable(args[0]): + assert len(args) == 1 and not kwargs, ( + "Use 'command(**kwargs)(callable)' to provide arguments." + ) + (func,) = args + args = () + + if self.command_class and kwargs.get("cls") is None: + kwargs["cls"] = self.command_class + + def decorator(f: t.Callable[..., t.Any]) -> Command: + cmd: Command = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + @t.overload + def group(self, __func: t.Callable[..., t.Any]) -> Group: ... + + @t.overload + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Group]: ... + + def group( + self, *args: t.Any, **kwargs: t.Any + ) -> t.Callable[[t.Callable[..., t.Any]], Group] | Group: + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` and + immediately registers the created group with this group by + calling :meth:`add_command`. + + To customize the group class used, set the :attr:`group_class` + attribute. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.0 + Added the :attr:`group_class` attribute. + """ + from .decorators import group + + func: t.Callable[..., t.Any] | None = None + + if args and callable(args[0]): + assert len(args) == 1 and not kwargs, ( + "Use 'group(**kwargs)(callable)' to provide arguments." + ) + (func,) = args + args = () + + if self.group_class is not None and kwargs.get("cls") is None: + if self.group_class is type: + kwargs["cls"] = type(self) + else: + kwargs["cls"] = self.group_class + + def decorator(f: t.Callable[..., t.Any]) -> Group: + cmd: Group = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + + if func is not None: + return decorator(func) + + return decorator + + def result_callback(self, replace: bool = False) -> t.Callable[[F], F]: + """Adds a result callback to the command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.result_callback() + def process_result(result, input): + return result + input + + :param replace: if set to `True` an already existing result + callback will be removed. + + .. versionchanged:: 8.0 + Renamed from ``resultcallback``. + + .. versionadded:: 3.0 + """ + + def decorator(f: F) -> F: + old_callback = self._result_callback + + if old_callback is None or replace: + self._result_callback = f + return f + + def function(value: t.Any, /, *args: t.Any, **kwargs: t.Any) -> t.Any: + inner = old_callback(value, *args, **kwargs) + return f(inner, *args, **kwargs) + + self._result_callback = rv = update_wrapper(t.cast(F, function), f) + return rv # type: ignore[return-value] + + return decorator + + def get_command(self, ctx: Context, cmd_name: str) -> Command | None: + """Given a context and a command name, this returns a :class:`Command` + object if it exists or returns ``None``. + """ + return self.commands.get(cmd_name) + + def list_commands(self, ctx: Context) -> list[str]: + """Returns a list of subcommand names in the order they should appear.""" + return sorted(self.commands) + + def collect_usage_pieces(self, ctx: Context) -> list[str]: + rv = super().collect_usage_pieces(ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx: Context, formatter: HelpFormatter) -> None: + super().format_options(ctx, formatter) + self.format_commands(ctx, formatter) + + def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None: + """Extra format methods for multi methods that adds all the commands + after the options. + """ + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + if cmd.hidden: + continue + + commands.append((subcommand, cmd)) + + # allow for 3 times the default spacing + if len(commands): + limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) + + rows = [] + for subcommand, cmd in commands: + help = cmd.get_short_help_str(limit) + rows.append((subcommand, help)) + + if rows: + with formatter.section(_("Commands")): + formatter.write_dl(rows) + + def parse_args(self, ctx: Context, args: list[str]) -> list[str]: + if not args and self.no_args_is_help and not ctx.resilient_parsing: + raise NoArgsIsHelpError(ctx) + + rest = super().parse_args(ctx, args) + + if self.chain: + ctx._protected_args = rest + ctx.args = [] + elif rest: + ctx._protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx: Context) -> t.Any: + def _process_result(value: t.Any) -> t.Any: + if self._result_callback is not None: + value = ctx.invoke(self._result_callback, value, **ctx.params) + return value + + if not ctx._protected_args: + if self.invoke_without_command: + # No subcommand was invoked, so the result callback is + # invoked with the group return value for regular + # groups, or an empty list for chained groups. + with ctx: + rv = super().invoke(ctx) + return _process_result([] if self.chain else rv) + ctx.fail(_("Missing command.")) + + # Fetch args back out + args = [*ctx._protected_args, *ctx.args] + ctx.args = [] + ctx._protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + ctx.invoked_subcommand = cmd_name + super().invoke(ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = "*" if args else None + super().invoke(ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + assert cmd is not None + sub_ctx = cmd.make_context( + cmd_name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + ) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command( + self, ctx: Context, args: list[str] + ) -> tuple[str | None, Command | None, list[str]]: + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None and not ctx.resilient_parsing: + if _split_opt(cmd_name)[0]: + self.parse_args(ctx, args) + ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name)) + return cmd_name if cmd else None, cmd, args[1:] + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. Looks + at the names of options, subcommands, and chained + multi-commands. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + results = [ + CompletionItem(name, help=command.get_short_help_str()) + for name, command in _complete_visible_commands(ctx, incomplete) + ] + results.extend(super().shell_complete(ctx, incomplete)) + return results + + +class _MultiCommand(Group, metaclass=_FakeSubclassCheck): + """ + .. deprecated:: 8.2 + Will be removed in Click 9.0. Use ``Group`` instead. + """ + + +class CommandCollection(Group): + """A :class:`Group` that looks up subcommands on other groups. If a command + is not found on this group, each registered source is checked in order. + Parameters on a source are not added to this group, and a source's callback + is not invoked when invoking its commands. In other words, this "flattens" + commands in many groups into this one group. + + :param name: The name of the group command. + :param sources: A list of :class:`Group` objects to look up commands from. + :param kwargs: Other arguments passed to :class:`Group`. + + .. versionchanged:: 8.2 + This is a subclass of ``Group``. Commands are looked up first on this + group, then each of its sources. + """ + + def __init__( + self, + name: str | None = None, + sources: list[Group] | None = None, + **kwargs: t.Any, + ) -> None: + super().__init__(name, **kwargs) + #: The list of registered groups. + self.sources: list[Group] = sources or [] + + def add_source(self, group: Group) -> None: + """Add a group as a source of commands.""" + self.sources.append(group) + + def get_command(self, ctx: Context, cmd_name: str) -> Command | None: + rv = super().get_command(ctx, cmd_name) + + if rv is not None: + return rv + + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + + if rv is not None: + if self.chain: + _check_nested_chain(self, cmd_name, rv) + + return rv + + return None + + def list_commands(self, ctx: Context) -> list[str]: + rv: set[str] = set(super().list_commands(ctx)) + + for source in self.sources: + rv.update(source.list_commands(ctx)) + + return sorted(rv) + + +def _check_iter(value: t.Any) -> cabc.Iterator[t.Any]: + """Check if the value is iterable but not a string. Raises a type + error, or return an iterator over the value. + """ + if isinstance(value, str): + raise TypeError + + return iter(value) + + +class Parameter: + r"""A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The latter is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: A function to further process or validate the value + after type conversion. It is called as ``f(ctx, param, value)`` + and must return the value. It is called for all sources, + including prompts. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). If ``nargs=-1``, all remaining + parameters are collected. + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: environment variable(s) that are used to provide a default value for + this parameter. This can be a string or a sequence of strings. If a sequence is + given, only the first non-empty environment variable is used for the parameter. + :param shell_complete: A function that returns custom shell + completions. Used instead of the param's type completion if + given. Takes ``ctx, param, incomplete`` and must return a list + of :class:`~click.shell_completion.CompletionItem` or a list of + strings. + :param deprecated: If ``True`` or non-empty string, issues a message + indicating that the argument is deprecated and highlights + its deprecation in --help. The message can be customized + by using a string as the value. A deprecated parameter + cannot be required, a ValueError will be raised otherwise. + + .. versionchanged:: 8.2.0 + Introduction of ``deprecated``. + + .. versionchanged:: 8.2 + Adding duplicate parameter names to a :class:`~click.core.Command` will + result in a ``UserWarning`` being shown. + + .. versionchanged:: 8.2 + Adding duplicate parameter names to a :class:`~click.core.Command` will + result in a ``UserWarning`` being shown. + + .. versionchanged:: 8.0 + ``process_value`` validates required parameters and bounded + ``nargs``, and invokes the parameter callback before returning + the value. This allows the callback to validate prompts. + ``full_process_value`` is removed. + + .. versionchanged:: 8.0 + ``autocompletion`` is renamed to ``shell_complete`` and has new + semantics described above. The old name is deprecated and will + be removed in 8.1, until then it will be wrapped to match the + new requirements. + + .. versionchanged:: 8.0 + For ``multiple=True, nargs>1``, the default must be a list of + tuples. + + .. versionchanged:: 8.0 + Setting a default is no longer required for ``nargs>1``, it will + default to ``None``. ``multiple=True`` or ``nargs=-1`` will + default to ``()``. + + .. versionchanged:: 7.1 + Empty environment variables are ignored rather than taking the + empty string value. This makes it possible for scripts to clear + variables if they can't unset them. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. The old callback format will still work, but it will + raise a warning to give you a chance to migrate the code easier. + """ + + param_type_name = "parameter" + + def __init__( + self, + param_decls: cabc.Sequence[str] | None = None, + type: types.ParamType | t.Any | None = None, + required: bool = False, + # XXX The default historically embed two concepts: + # - the declaration of a Parameter object carrying the default (handy to + # arbitrage the default value of coupled Parameters sharing the same + # self.name, like flag options), + # - and the actual value of the default. + # It is confusing and is the source of many issues discussed in: + # https://github.com/pallets/click/pull/3030 + # In the future, we might think of splitting it in two, not unlike + # Option.is_flag and Option.flag_value: we could have something like + # Parameter.is_default and Parameter.default_value. + default: t.Any | t.Callable[[], t.Any] | None = UNSET, + callback: t.Callable[[Context, Parameter, t.Any], t.Any] | None = None, + nargs: int | None = None, + multiple: bool = False, + metavar: str | None = None, + expose_value: bool = True, + is_eager: bool = False, + envvar: str | cabc.Sequence[str] | None = None, + shell_complete: t.Callable[ + [Context, Parameter, str], list[CompletionItem] | list[str] + ] + | None = None, + deprecated: bool | str = False, + ) -> None: + self.name: str | None + self.opts: list[str] + self.secondary_opts: list[str] + self.name, self.opts, self.secondary_opts = self._parse_decls( + param_decls or (), expose_value + ) + self.type: types.ParamType = types.convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = multiple + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + self._custom_shell_complete = shell_complete + self.deprecated = deprecated + + if __debug__: + if self.type.is_composite and nargs != self.type.arity: + raise ValueError( + f"'nargs' must be {self.type.arity} (or None) for" + f" type {self.type!r}, but it was {nargs}." + ) + + if required and deprecated: + raise ValueError( + f"The {self.param_type_name} '{self.human_readable_name}' " + "is deprecated and still required. A deprecated " + f"{self.param_type_name} cannot be required." + ) + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionchanged:: 8.3.0 + Returns ``None`` for the :attr:`default` if it was not set. + + .. versionadded:: 8.0 + """ + return { + "name": self.name, + "param_type_name": self.param_type_name, + "opts": self.opts, + "secondary_opts": self.secondary_opts, + "type": self.type.to_info_dict(), + "required": self.required, + "nargs": self.nargs, + "multiple": self.multiple, + # We explicitly hide the :attr:`UNSET` value to the user, as we choose to + # make it an implementation detail. And because ``to_info_dict`` has been + # designed for documentation purposes, we return ``None`` instead. + "default": self.default if self.default is not UNSET else None, + "envvar": self.envvar, + } + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.name}>" + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + raise NotImplementedError() + + @property + def human_readable_name(self) -> str: + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name # type: ignore + + def make_metavar(self, ctx: Context) -> str: + if self.metavar is not None: + return self.metavar + + metavar = self.type.get_metavar(param=self, ctx=ctx) + + if metavar is None: + metavar = self.type.name.upper() + + if self.nargs != 1: + metavar += "..." + + return metavar + + @t.overload + def get_default( + self, ctx: Context, call: t.Literal[True] = True + ) -> t.Any | None: ... + + @t.overload + def get_default( + self, ctx: Context, call: bool = ... + ) -> t.Any | t.Callable[[], t.Any] | None: ... + + def get_default( + self, ctx: Context, call: bool = True + ) -> t.Any | t.Callable[[], t.Any] | None: + """Get the default for the parameter. Tries + :meth:`Context.lookup_default` first, then the local default. + + :param ctx: Current context. + :param call: If the default is a callable, call it. Disable to + return the callable instead. + + .. versionchanged:: 8.0.2 + Type casting is no longer performed when getting a default. + + .. versionchanged:: 8.0.1 + Type casting can fail in resilient parsing mode. Invalid + defaults will not prevent showing help text. + + .. versionchanged:: 8.0 + Looks at ``ctx.default_map`` first. + + .. versionchanged:: 8.0 + Added the ``call`` parameter. + """ + value = ctx.lookup_default(self.name, call=False) # type: ignore + + if value is UNSET: + value = self.default + + if call and callable(value): + value = value() + + return value + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + raise NotImplementedError() + + def consume_value( + self, ctx: Context, opts: cabc.Mapping[str, t.Any] + ) -> tuple[t.Any, ParameterSource]: + """Returns the parameter value produced by the parser. + + If the parser did not produce a value from user input, the value is either + sourced from the environment variable, the default map, or the parameter's + default value. In that order of precedence. + + If no value is found, an internal sentinel value is returned. + + :meta private: + """ + # Collect from the parse the value passed by the user to the CLI. + value = opts.get(self.name, UNSET) # type: ignore + # If the value is set, it means it was sourced from the command line by the + # parser, otherwise it left unset by default. + source = ( + ParameterSource.COMMANDLINE + if value is not UNSET + else ParameterSource.DEFAULT + ) + + if value is UNSET: + envvar_value = self.value_from_envvar(ctx) + if envvar_value is not None: + value = envvar_value + source = ParameterSource.ENVIRONMENT + + if value is UNSET: + default_map_value = ctx.lookup_default(self.name) # type: ignore + if default_map_value is not UNSET: + value = default_map_value + source = ParameterSource.DEFAULT_MAP + + if value is UNSET: + default_value = self.get_default(ctx) + if default_value is not UNSET: + value = default_value + source = ParameterSource.DEFAULT + + return value, source + + def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: + """Convert and validate a value against the parameter's + :attr:`type`, :attr:`multiple`, and :attr:`nargs`. + """ + if value in (None, UNSET): + if self.multiple or self.nargs == -1: + return () + else: + return value + + def check_iter(value: t.Any) -> cabc.Iterator[t.Any]: + try: + return _check_iter(value) + except TypeError: + # This should only happen when passing in args manually, + # the parser should construct an iterable when parsing + # the command line. + raise BadParameter( + _("Value must be an iterable."), ctx=ctx, param=self + ) from None + + # Define the conversion function based on nargs and type. + + if self.nargs == 1 or self.type.is_composite: + + def convert(value: t.Any) -> t.Any: + return self.type(value, param=self, ctx=ctx) + + elif self.nargs == -1: + + def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...] + return tuple(self.type(x, self, ctx) for x in check_iter(value)) + + else: # nargs > 1 + + def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...] + value = tuple(check_iter(value)) + + if len(value) != self.nargs: + raise BadParameter( + ngettext( + "Takes {nargs} values but 1 was given.", + "Takes {nargs} values but {len} were given.", + len(value), + ).format(nargs=self.nargs, len=len(value)), + ctx=ctx, + param=self, + ) + + return tuple(self.type(x, self, ctx) for x in value) + + if self.multiple: + return tuple(convert(x) for x in check_iter(value)) + + return convert(value) + + def value_is_missing(self, value: t.Any) -> bool: + """A value is considered missing if: + + - it is :attr:`UNSET`, + - or if it is an empty sequence while the parameter is suppose to have + non-single value (i.e. :attr:`nargs` is not ``1`` or :attr:`multiple` is + set). + + :meta private: + """ + if value is UNSET: + return True + + if (self.nargs != 1 or self.multiple) and value == (): + return True + + return False + + def process_value(self, ctx: Context, value: t.Any) -> t.Any: + """Process the value of this parameter: + + 1. Type cast the value using :meth:`type_cast_value`. + 2. Check if the value is missing (see: :meth:`value_is_missing`), and raise + :exc:`MissingParameter` if it is required. + 3. If a :attr:`callback` is set, call it to have the value replaced by the + result of the callback. If the value was not set, the callback receive + ``None``. This keep the legacy behavior as it was before the introduction of + the :attr:`UNSET` sentinel. + + :meta private: + """ + value = self.type_cast_value(ctx, value) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + if self.callback is not None: + # Legacy case: UNSET is not exposed directly to the callback, but converted + # to None. + if value is UNSET: + value = None + value = self.callback(ctx, self, value) + + return value + + def resolve_envvar_value(self, ctx: Context) -> str | None: + """Returns the value found in the environment variable(s) attached to this + parameter. + + Environment variables values are `always returned as strings + `_. + + This method returns ``None`` if: + + - the :attr:`envvar` property is not set on the :class:`Parameter`, + - the environment variable is not found in the environment, + - the variable is found in the environment but its value is empty (i.e. the + environment variable is present but has an empty string). + + If :attr:`envvar` is setup with multiple environment variables, + then only the first non-empty value is returned. + + .. caution:: + + The raw value extracted from the environment is not normalized and is + returned as-is. Any normalization or reconciliation is performed later by + the :class:`Parameter`'s :attr:`type`. + + :meta private: + """ + if not self.envvar: + return None + + if isinstance(self.envvar, str): + rv = os.environ.get(self.envvar) + + if rv: + return rv + else: + for envvar in self.envvar: + rv = os.environ.get(envvar) + + # Return the first non-empty value of the list of environment variables. + if rv: + return rv + # Else, absence of value is interpreted as an environment variable that + # is not set, so proceed to the next one. + + return None + + def value_from_envvar(self, ctx: Context) -> str | cabc.Sequence[str] | None: + """Process the raw environment variable string for this parameter. + + Returns the string as-is or splits it into a sequence of strings if the + parameter is expecting multiple values (i.e. its :attr:`nargs` property is set + to a value other than ``1``). + + :meta private: + """ + rv = self.resolve_envvar_value(ctx) + + if rv is not None and self.nargs != 1: + return self.type.split_envvar_value(rv) + + return rv + + def handle_parse_result( + self, ctx: Context, opts: cabc.Mapping[str, t.Any], args: list[str] + ) -> tuple[t.Any, list[str]]: + """Process the value produced by the parser from user input. + + Always process the value through the Parameter's :attr:`type`, wherever it + comes from. + + If the parameter is deprecated, this method warn the user about it. But only if + the value has been explicitly set by the user (and as such, is not coming from + a default). + + :meta private: + """ + with augment_usage_errors(ctx, param=self): + value, source = self.consume_value(ctx, opts) + + ctx.set_parameter_source(self.name, source) # type: ignore + + # Display a deprecation warning if necessary. + if ( + self.deprecated + and value is not UNSET + and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP) + ): + extra_message = ( + f" {self.deprecated}" if isinstance(self.deprecated, str) else "" + ) + message = _( + "DeprecationWarning: The {param_type} {name!r} is deprecated." + "{extra_message}" + ).format( + param_type=self.param_type_name, + name=self.human_readable_name, + extra_message=extra_message, + ) + echo(style(message, fg="red"), err=True) + + # Process the value through the parameter's type. + try: + value = self.process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + # In resilient parsing mode, we do not want to fail the command if the + # value is incompatible with the parameter type, so we reset the value + # to UNSET, which will be interpreted as a missing value. + value = UNSET + + # Add parameter's value to the context. + if ( + self.expose_value + # We skip adding the value if it was previously set by another parameter + # targeting the same variable name. This prevents parameters competing for + # the same name to override each other. + and self.name not in ctx.params + ): + # Click is logically enforcing that the name is None if the parameter is + # not to be exposed. We still assert it here to please the type checker. + assert self.name is not None, ( + f"{self!r} parameter's name should not be None when exposing value." + ) + # Normalize UNSET values to None, as we're about to pass them to the + # command function and move them to the pure-Python realm of user-written + # code. + ctx.params[self.name] = value if value is not UNSET else None + + return value, args + + def get_help_record(self, ctx: Context) -> tuple[str, str] | None: + pass + + def get_usage_pieces(self, ctx: Context) -> list[str]: + return [] + + def get_error_hint(self, ctx: Context) -> str: + """Get a stringified version of the param for use in error messages to + indicate which param caused the error. + """ + hint_list = self.opts or [self.human_readable_name] + return " / ".join(f"'{x}'" for x in hint_list) + + def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]: + """Return a list of completions for the incomplete value. If a + ``shell_complete`` function was given during init, it is used. + Otherwise, the :attr:`type` + :meth:`~click.types.ParamType.shell_complete` function is used. + + :param ctx: Invocation context for this command. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + if self._custom_shell_complete is not None: + results = self._custom_shell_complete(ctx, self, incomplete) + + if results and isinstance(results[0], str): + from click.shell_completion import CompletionItem + + results = [CompletionItem(c) for c in results] + + return t.cast("list[CompletionItem]", results) + + return self.type.shell_complete(ctx, self, incomplete) + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: Show the default value for this option in its + help text. Values are not shown by default, unless + :attr:`Context.show_default` is ``True``. If this value is a + string, it shows that string in parentheses instead of the + actual value. This is particularly useful for dynamic options. + For single option boolean flags, the default remains hidden if + its value is ``False``. + :param show_envvar: Controls if an environment variable should be + shown on the help page and error messages. + Normally, environment variables are not shown. + :param prompt: If set to ``True`` or a non empty string then the + user will be prompted for input. If set to ``True`` the prompt + will be the option name capitalized. A deprecated option cannot be + prompted. + :param confirmation_prompt: Prompt a second time to confirm the + value if it was prompted for. Can be set to a string instead of + ``True`` to customize the message. + :param prompt_required: If set to ``False``, the user will be + prompted for input only when the option was specified as a flag + without a value. + :param hide_input: If this is ``True`` then the input on the prompt + will be hidden from the user. This is useful for password input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + :param hidden: hide this option from help outputs. + :param attrs: Other command arguments described in :class:`Parameter`. + + .. versionchanged:: 8.2 + ``envvar`` used with ``flag_value`` will always use the ``flag_value``, + previously it would use the value of the environment variable. + + .. versionchanged:: 8.1 + Help text indentation is cleaned here instead of only in the + ``@option`` decorator. + + .. versionchanged:: 8.1 + The ``show_default`` parameter overrides + ``Context.show_default``. + + .. versionchanged:: 8.1 + The default of a single option boolean flag is not shown if the + default value is ``False``. + + .. versionchanged:: 8.0.1 + ``type`` is detected from ``flag_value`` if given. + """ + + param_type_name = "option" + + def __init__( + self, + param_decls: cabc.Sequence[str] | None = None, + show_default: bool | str | None = None, + prompt: bool | str = False, + confirmation_prompt: bool | str = False, + prompt_required: bool = True, + hide_input: bool = False, + is_flag: bool | None = None, + flag_value: t.Any = UNSET, + multiple: bool = False, + count: bool = False, + allow_from_autoenv: bool = True, + type: types.ParamType | t.Any | None = None, + help: str | None = None, + hidden: bool = False, + show_choices: bool = True, + show_envvar: bool = False, + deprecated: bool | str = False, + **attrs: t.Any, + ) -> None: + if help: + help = inspect.cleandoc(help) + + super().__init__( + param_decls, type=type, multiple=multiple, deprecated=deprecated, **attrs + ) + + if prompt is True: + if self.name is None: + raise TypeError("'name' is required with 'prompt=True'.") + + prompt_text: str | None = self.name.replace("_", " ").capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + + if deprecated: + deprecated_message = ( + f"(DEPRECATED: {deprecated})" + if isinstance(deprecated, str) + else "(DEPRECATED)" + ) + help = help + deprecated_message if help is not None else deprecated_message + + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.prompt_required = prompt_required + self.hide_input = hide_input + self.hidden = hidden + + # The _flag_needs_value property tells the parser that this option is a flag + # that cannot be used standalone and needs a value. With this information, the + # parser can determine whether to consider the next user-provided argument in + # the CLI as a value for this flag or as a new option. + # If prompt is enabled but not required, then it opens the possibility for the + # option to gets its value from the user. + self._flag_needs_value = self.prompt is not None and not self.prompt_required + + # Auto-detect if this is a flag or not. + if is_flag is None: + # Implicitly a flag because flag_value was set. + if flag_value is not UNSET: + is_flag = True + # Not a flag, but when used as a flag it shows a prompt. + elif self._flag_needs_value: + is_flag = False + # Implicitly a flag because secondary options names were given. + elif self.secondary_opts: + is_flag = True + # The option is explicitly not a flag. But we do not know yet if it needs a + # value or not. So we look at the default value to determine it. + elif is_flag is False and not self._flag_needs_value: + self._flag_needs_value = self.default is UNSET + + if is_flag: + # Set missing default for flags if not explicitly required or prompted. + if self.default is UNSET and not self.required and not self.prompt: + if multiple: + self.default = () + + # Auto-detect the type of the flag based on the flag_value. + if type is None: + # A flag without a flag_value is a boolean flag. + if flag_value is UNSET: + self.type = types.BoolParamType() + # If the flag value is a boolean, use BoolParamType. + elif isinstance(flag_value, bool): + self.type = types.BoolParamType() + # Otherwise, guess the type from the flag value. + else: + self.type = types.convert_type(None, flag_value) + + self.is_flag: bool = bool(is_flag) + self.is_bool_flag: bool = bool( + is_flag and isinstance(self.type, types.BoolParamType) + ) + self.flag_value: t.Any = flag_value + + # Set boolean flag default to False if unset and not required. + if self.is_bool_flag: + if self.default is UNSET and not self.required: + self.default = False + + # Support the special case of aligning the default value with the flag_value + # for flags whose default is explicitly set to True. Note that as long as we + # have this condition, there is no way a flag can have a default set to True, + # and a flag_value set to something else. Refs: + # https://github.com/pallets/click/issues/3024#issuecomment-3146199461 + # https://github.com/pallets/click/pull/3030/commits/06847da + if self.default is True and self.flag_value is not UNSET: + self.default = self.flag_value + + # Set the default flag_value if it is not set. + if self.flag_value is UNSET: + if self.is_flag: + self.flag_value = True + else: + self.flag_value = None + + # Counting. + self.count = count + if count: + if type is None: + self.type = types.IntRange(min=0) + if self.default is UNSET: + self.default = 0 + + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + self.show_choices = show_choices + self.show_envvar = show_envvar + + if __debug__: + if deprecated and prompt: + raise ValueError("`deprecated` options cannot use `prompt`.") + + if self.nargs == -1: + raise TypeError("nargs=-1 is not supported for options.") + + if not self.is_bool_flag and self.secondary_opts: + raise TypeError("Secondary flag is not valid for non-boolean flag.") + + if self.is_bool_flag and self.hide_input and self.prompt is not None: + raise TypeError( + "'prompt' with 'hide_input' is not valid for boolean flag." + ) + + if self.count: + if self.multiple: + raise TypeError("'count' is not valid with 'multiple'.") + + if self.is_flag: + raise TypeError("'count' is not valid with 'is_flag'.") + + def to_info_dict(self) -> dict[str, t.Any]: + """ + .. versionchanged:: 8.3.0 + Returns ``None`` for the :attr:`flag_value` if it was not set. + """ + info_dict = super().to_info_dict() + info_dict.update( + help=self.help, + prompt=self.prompt, + is_flag=self.is_flag, + # We explicitly hide the :attr:`UNSET` value to the user, as we choose to + # make it an implementation detail. And because ``to_info_dict`` has been + # designed for documentation purposes, we return ``None`` instead. + flag_value=self.flag_value if self.flag_value is not UNSET else None, + count=self.count, + hidden=self.hidden, + ) + return info_dict + + def get_error_hint(self, ctx: Context) -> str: + result = super().get_error_hint(ctx) + if self.show_envvar and self.envvar is not None: + result += f" (env var: '{self.envvar}')" + return result + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if decl.isidentifier(): + if name is not None: + raise TypeError(f"Name '{name}' defined twice") + name = decl + else: + split_char = ";" if decl[:1] == "/" else "/" + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(_split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + if first == second: + raise ValueError( + f"Boolean option {decl!r} cannot use the" + " same flag for true/false." + ) + else: + possible_names.append(_split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: -len(x[0])) # group long options first + name = possible_names[0][1].replace("-", "_").lower() + if not name.isidentifier(): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError( + f"Could not determine name for option with declarations {decls!r}" + ) + + if not opts and not secondary_opts: + raise TypeError( + f"No options defined but a name was passed ({name})." + " Did you mean to declare an argument instead? Did" + f" you mean to pass '--{name}'?" + ) + + return name, opts, secondary_opts + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + if self.multiple: + action = "append" + elif self.count: + action = "count" + else: + action = "store" + + if self.is_flag: + action = f"{action}_const" + + if self.is_bool_flag and self.secondary_opts: + parser.add_option( + obj=self, opts=self.opts, dest=self.name, action=action, const=True + ) + parser.add_option( + obj=self, + opts=self.secondary_opts, + dest=self.name, + action=action, + const=False, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + const=self.flag_value, + ) + else: + parser.add_option( + obj=self, + opts=self.opts, + dest=self.name, + action=action, + nargs=self.nargs, + ) + + def get_help_record(self, ctx: Context) -> tuple[str, str] | None: + if self.hidden: + return None + + any_prefix_is_slash = False + + def _write_opts(opts: cabc.Sequence[str]) -> str: + nonlocal any_prefix_is_slash + + rv, any_slashes = join_options(opts) + + if any_slashes: + any_prefix_is_slash = True + + if not self.is_flag and not self.count: + rv += f" {self.make_metavar(ctx=ctx)}" + + return rv + + rv = [_write_opts(self.opts)] + + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or "" + + extra = self.get_help_extra(ctx) + extra_items = [] + if "envvars" in extra: + extra_items.append( + _("env var: {var}").format(var=", ".join(extra["envvars"])) + ) + if "default" in extra: + extra_items.append(_("default: {default}").format(default=extra["default"])) + if "range" in extra: + extra_items.append(extra["range"]) + if "required" in extra: + extra_items.append(_(extra["required"])) + + if extra_items: + extra_str = "; ".join(extra_items) + help = f"{help} [{extra_str}]" if help else f"[{extra_str}]" + + return ("; " if any_prefix_is_slash else " / ").join(rv), help + + def get_help_extra(self, ctx: Context) -> types.OptionHelpExtra: + extra: types.OptionHelpExtra = {} + + if self.show_envvar: + envvar = self.envvar + + if envvar is None: + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + + if envvar is not None: + if isinstance(envvar, str): + extra["envvars"] = (envvar,) + else: + extra["envvars"] = tuple(str(d) for d in envvar) + + # Temporarily enable resilient parsing to avoid type casting + # failing for the default. Might be possible to extend this to + # help formatting in general. + resilient = ctx.resilient_parsing + ctx.resilient_parsing = True + + try: + default_value = self.get_default(ctx, call=False) + finally: + ctx.resilient_parsing = resilient + + show_default = False + show_default_is_str = False + + if self.show_default is not None: + if isinstance(self.show_default, str): + show_default_is_str = show_default = True + else: + show_default = self.show_default + elif ctx.show_default is not None: + show_default = ctx.show_default + + if show_default_is_str or ( + show_default and (default_value not in (None, UNSET)) + ): + if show_default_is_str: + default_string = f"({self.show_default})" + elif isinstance(default_value, (list, tuple)): + default_string = ", ".join(str(d) for d in default_value) + elif isinstance(default_value, enum.Enum): + default_string = default_value.name + elif inspect.isfunction(default_value): + default_string = _("(dynamic)") + elif self.is_bool_flag and self.secondary_opts: + # For boolean flags that have distinct True/False opts, + # use the opt without prefix instead of the value. + default_string = _split_opt( + (self.opts if default_value else self.secondary_opts)[0] + )[1] + elif self.is_bool_flag and not self.secondary_opts and not default_value: + default_string = "" + elif default_value == "": + default_string = '""' + else: + default_string = str(default_value) + + if default_string: + extra["default"] = default_string + + if ( + isinstance(self.type, types._NumberRangeBase) + # skip count with default range type + and not (self.count and self.type.min == 0 and self.type.max is None) + ): + range_str = self.type._describe_range() + + if range_str: + extra["range"] = range_str + + if self.required: + extra["required"] = "required" + + return extra + + def prompt_for_value(self, ctx: Context) -> t.Any: + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + assert self.prompt is not None + + # Calculate the default before prompting anything to lock in the value before + # attempting any user interaction. + default = self.get_default(ctx) + + # A boolean flag can use a simplified [y/n] confirmation prompt. + if self.is_bool_flag: + # If we have no boolean default, we force the user to explicitly provide + # one. + if default in (UNSET, None): + default = None + # Nothing prevent you to declare an option that is simultaneously: + # 1) auto-detected as a boolean flag, + # 2) allowed to prompt, and + # 3) still declare a non-boolean default. + # This forced casting into a boolean is necessary to align any non-boolean + # default to the prompt, which is going to be a [y/n]-style confirmation + # because the option is still a boolean flag. That way, instead of [y/n], + # we get [Y/n] or [y/N] depending on the truthy value of the default. + # Refs: https://github.com/pallets/click/pull/3030#discussion_r2289180249 + else: + default = bool(default) + return confirm(self.prompt, default) + + # If show_default is set to True/False, provide this to `prompt` as well. For + # non-bool values of `show_default`, we use `prompt`'s default behavior + prompt_kwargs: t.Any = {} + if isinstance(self.show_default, bool): + prompt_kwargs["show_default"] = self.show_default + + return prompt( + self.prompt, + # Use ``None`` to inform the prompt() function to reiterate until a valid + # value is provided by the user if we have no default. + default=None if default is UNSET else default, + type=self.type, + hide_input=self.hide_input, + show_choices=self.show_choices, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x), + **prompt_kwargs, + ) + + def resolve_envvar_value(self, ctx: Context) -> str | None: + """:class:`Option` resolves its environment variable the same way as + :func:`Parameter.resolve_envvar_value`, but it also supports + :attr:`Context.auto_envvar_prefix`. If we could not find an environment from + the :attr:`envvar` property, we fallback on :attr:`Context.auto_envvar_prefix` + to build dynamiccaly the environment variable name using the + :python:`{ctx.auto_envvar_prefix}_{self.name.upper()}` template. + + :meta private: + """ + rv = super().resolve_envvar_value(ctx) + + if rv is not None: + return rv + + if ( + self.allow_from_autoenv + and ctx.auto_envvar_prefix is not None + and self.name is not None + ): + envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}" + rv = os.environ.get(envvar) + + if rv: + return rv + + return None + + def value_from_envvar(self, ctx: Context) -> t.Any: + """For :class:`Option`, this method processes the raw environment variable + string the same way as :func:`Parameter.value_from_envvar` does. + + But in the case of non-boolean flags, the value is analyzed to determine if the + flag is activated or not, and returns a boolean of its activation, or the + :attr:`flag_value` if the latter is set. + + This method also takes care of repeated options (i.e. options with + :attr:`multiple` set to ``True``). + + :meta private: + """ + rv = self.resolve_envvar_value(ctx) + + # Absent environment variable or an empty string is interpreted as unset. + if rv is None: + return None + + # Non-boolean flags are more liberal in what they accept. But a flag being a + # flag, its envvar value still needs to be analyzed to determine if the flag is + # activated or not. + if self.is_flag and not self.is_bool_flag: + # If the flag_value is set and match the envvar value, return it + # directly. + if self.flag_value is not UNSET and rv == self.flag_value: + return self.flag_value + # Analyze the envvar value as a boolean to know if the flag is + # activated or not. + return types.BoolParamType.str_to_bool(rv) + + # Split the envvar value if it is allowed to be repeated. + value_depth = (self.nargs != 1) + bool(self.multiple) + if value_depth > 0: + multi_rv = self.type.split_envvar_value(rv) + if self.multiple and self.nargs != 1: + multi_rv = batch(multi_rv, self.nargs) # type: ignore[assignment] + + return multi_rv + + return rv + + def consume_value( + self, ctx: Context, opts: cabc.Mapping[str, Parameter] + ) -> tuple[t.Any, ParameterSource]: + """For :class:`Option`, the value can be collected from an interactive prompt + if the option is a flag that needs a value (and the :attr:`prompt` property is + set). + + Additionally, this method handles flag option that are activated without a + value, in which case the :attr:`flag_value` is returned. + + :meta private: + """ + value, source = super().consume_value(ctx, opts) + + # The parser will emit a sentinel value if the option is allowed to as a flag + # without a value. + if value is FLAG_NEEDS_VALUE: + # If the option allows for a prompt, we start an interaction with the user. + if self.prompt is not None and not ctx.resilient_parsing: + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + # Else the flag takes its flag_value as value. + else: + value = self.flag_value + source = ParameterSource.COMMANDLINE + + # A flag which is activated always returns the flag value, unless the value + # comes from the explicitly sets default. + elif ( + self.is_flag + and value is True + and not self.is_bool_flag + and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP) + ): + value = self.flag_value + + # Re-interpret a multiple option which has been sent as-is by the parser. + # Here we replace each occurrence of value-less flags (marked by the + # FLAG_NEEDS_VALUE sentinel) with the flag_value. + elif ( + self.multiple + and value is not UNSET + and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP) + and any(v is FLAG_NEEDS_VALUE for v in value) + ): + value = [self.flag_value if v is FLAG_NEEDS_VALUE else v for v in value] + source = ParameterSource.COMMANDLINE + + # The value wasn't set, or used the param's default, prompt for one to the user + # if prompting is enabled. + elif ( + ( + value is UNSET + or source in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP) + ) + and self.prompt is not None + and (self.required or self.prompt_required) + and not ctx.resilient_parsing + ): + value = self.prompt_for_value(ctx) + source = ParameterSource.PROMPT + + return value, source + + def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any: + if self.is_flag and not self.required: + if value is UNSET: + if self.is_bool_flag: + # If the flag is a boolean flag, we return False if it is not set. + value = False + return super().type_cast_value(ctx, value) + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the constructor of :class:`Parameter`. + """ + + param_type_name = "argument" + + def __init__( + self, + param_decls: cabc.Sequence[str], + required: bool | None = None, + **attrs: t.Any, + ) -> None: + # Auto-detect the requirement status of the argument if not explicitly set. + if required is None: + # The argument gets automatically required if it has no explicit default + # value set and is setup to match at least one value. + if attrs.get("default", UNSET) is UNSET: + required = attrs.get("nargs", 1) > 0 + # If the argument has a default value, it is not required. + else: + required = False + + if "multiple" in attrs: + raise TypeError("__init__() got an unexpected keyword argument 'multiple'.") + + super().__init__(param_decls, required=required, **attrs) + + @property + def human_readable_name(self) -> str: + if self.metavar is not None: + return self.metavar + return self.name.upper() # type: ignore + + def make_metavar(self, ctx: Context) -> str: + if self.metavar is not None: + return self.metavar + var = self.type.get_metavar(param=self, ctx=ctx) + if not var: + var = self.name.upper() # type: ignore + if self.deprecated: + var += "!" + if not self.required: + var = f"[{var}]" + if self.nargs != 1: + var += "..." + return var + + def _parse_decls( + self, decls: cabc.Sequence[str], expose_value: bool + ) -> tuple[str | None, list[str], list[str]]: + if not decls: + if not expose_value: + return None, [], [] + raise TypeError("Argument is marked as exposed, but does not have a name.") + if len(decls) == 1: + name = arg = decls[0] + name = name.replace("-", "_").lower() + else: + raise TypeError( + "Arguments take exactly one parameter declaration, got" + f" {len(decls)}: {decls}." + ) + return name, [arg], [] + + def get_usage_pieces(self, ctx: Context) -> list[str]: + return [self.make_metavar(ctx)] + + def get_error_hint(self, ctx: Context) -> str: + return f"'{self.make_metavar(ctx)}'" + + def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None: + parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) + + +def __getattr__(name: str) -> object: + import warnings + + if name == "BaseCommand": + warnings.warn( + "'BaseCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Command' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _BaseCommand + + if name == "MultiCommand": + warnings.warn( + "'MultiCommand' is deprecated and will be removed in Click 9.0. Use" + " 'Group' instead.", + DeprecationWarning, + stacklevel=2, + ) + return _MultiCommand + + raise AttributeError(name) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/decorators.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/decorators.py new file mode 100644 index 0000000..21f4c34 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/decorators.py @@ -0,0 +1,551 @@ +from __future__ import annotations + +import inspect +import typing as t +from functools import update_wrapper +from gettext import gettext as _ + +from .core import Argument +from .core import Command +from .core import Context +from .core import Group +from .core import Option +from .core import Parameter +from .globals import get_current_context +from .utils import echo + +if t.TYPE_CHECKING: + import typing_extensions as te + + P = te.ParamSpec("P") + +R = t.TypeVar("R") +T = t.TypeVar("T") +_AnyCallable = t.Callable[..., t.Any] +FC = t.TypeVar("FC", bound="_AnyCallable | Command") + + +def pass_context(f: t.Callable[te.Concatenate[Context, P], R]) -> t.Callable[P, R]: + """Marks a callback as wanting to receive the current context + object as first argument. + """ + + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + return f(get_current_context(), *args, **kwargs) + + return update_wrapper(new_func, f) + + +def pass_obj(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + """Similar to :func:`pass_context`, but only pass the object on the + context onwards (:attr:`Context.obj`). This is useful if that object + represents the state of a nested system. + """ + + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + return f(get_current_context().obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + +def make_pass_decorator( + object_type: type[T], ensure: bool = False +) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]: + """Given an object type this creates a decorator that will work + similar to :func:`pass_obj` but instead of passing the object of the + current context, it will find the innermost context of type + :func:`object_type`. + + This generates a decorator that works roughly like this:: + + from functools import update_wrapper + + def decorator(f): + @pass_context + def new_func(ctx, *args, **kwargs): + obj = ctx.find_object(object_type) + return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator + + :param object_type: the type of the object to pass. + :param ensure: if set to `True`, a new object will be created and + remembered on the context if it's not there yet. + """ + + def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + ctx = get_current_context() + + obj: T | None + if ensure: + obj = ctx.ensure_object(object_type) + else: + obj = ctx.find_object(object_type) + + if obj is None: + raise RuntimeError( + "Managed to invoke callback without a context" + f" object of type {object_type.__name__!r}" + " existing." + ) + + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + return decorator + + +def pass_meta_key( + key: str, *, doc_description: str | None = None +) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]: + """Create a decorator that passes a key from + :attr:`click.Context.meta` as the first argument to the decorated + function. + + :param key: Key in ``Context.meta`` to pass. + :param doc_description: Description of the object being passed, + inserted into the decorator's docstring. Defaults to "the 'key' + key from Context.meta". + + .. versionadded:: 8.0 + """ + + def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]: + def new_func(*args: P.args, **kwargs: P.kwargs) -> R: + ctx = get_current_context() + obj = ctx.meta[key] + return ctx.invoke(f, obj, *args, **kwargs) + + return update_wrapper(new_func, f) + + if doc_description is None: + doc_description = f"the {key!r} key from :attr:`click.Context.meta`" + + decorator.__doc__ = ( + f"Decorator that passes {doc_description} as the first argument" + " to the decorated function." + ) + return decorator + + +CmdType = t.TypeVar("CmdType", bound=Command) + + +# variant: no call, directly as decorator for a function. +@t.overload +def command(name: _AnyCallable) -> Command: ... + + +# variant: with positional name and with positional or keyword cls argument: +# @command(namearg, CommandCls, ...) or @command(namearg, cls=CommandCls, ...) +@t.overload +def command( + name: str | None, + cls: type[CmdType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], CmdType]: ... + + +# variant: name omitted, cls _must_ be a keyword argument, @command(cls=CommandCls, ...) +@t.overload +def command( + name: None = None, + *, + cls: type[CmdType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], CmdType]: ... + + +# variant: with optional string name, no cls argument provided. +@t.overload +def command( + name: str | None = ..., cls: None = None, **attrs: t.Any +) -> t.Callable[[_AnyCallable], Command]: ... + + +def command( + name: str | _AnyCallable | None = None, + cls: type[CmdType] | None = None, + **attrs: t.Any, +) -> Command | t.Callable[[_AnyCallable], Command | CmdType]: + r"""Creates a new :class:`Command` and uses the decorated function as + callback. This will also automatically attach all decorated + :func:`option`\s and :func:`argument`\s as parameters to the command. + + The name of the command defaults to the name of the function, converted to + lowercase, with underscores ``_`` replaced by dashes ``-``, and the suffixes + ``_command``, ``_cmd``, ``_group``, and ``_grp`` are removed. For example, + ``init_data_command`` becomes ``init-data``. + + All keyword arguments are forwarded to the underlying command class. + For the ``params`` argument, any decorated params are appended to + the end of the list. + + Once decorated the function turns into a :class:`Command` instance + that can be invoked as a command line utility or be attached to a + command :class:`Group`. + + :param name: The name of the command. Defaults to modifying the function's + name as described above. + :param cls: The command class to create. Defaults to :class:`Command`. + + .. versionchanged:: 8.2 + The suffixes ``_command``, ``_cmd``, ``_group``, and ``_grp`` are + removed when generating the name. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + + .. versionchanged:: 8.1 + The ``params`` argument can be used. Decorated params are + appended to the end of the list. + """ + + func: t.Callable[[_AnyCallable], t.Any] | None = None + + if callable(name): + func = name + name = None + assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class." + assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments." + + if cls is None: + cls = t.cast("type[CmdType]", Command) + + def decorator(f: _AnyCallable) -> CmdType: + if isinstance(f, Command): + raise TypeError("Attempted to convert a callback into a command twice.") + + attr_params = attrs.pop("params", None) + params = attr_params if attr_params is not None else [] + + try: + decorator_params = f.__click_params__ # type: ignore + except AttributeError: + pass + else: + del f.__click_params__ # type: ignore + params.extend(reversed(decorator_params)) + + if attrs.get("help") is None: + attrs["help"] = f.__doc__ + + if t.TYPE_CHECKING: + assert cls is not None + assert not callable(name) + + if name is not None: + cmd_name = name + else: + cmd_name = f.__name__.lower().replace("_", "-") + cmd_left, sep, suffix = cmd_name.rpartition("-") + + if sep and suffix in {"command", "cmd", "group", "grp"}: + cmd_name = cmd_left + + cmd = cls(name=cmd_name, callback=f, params=params, **attrs) + cmd.__doc__ = f.__doc__ + return cmd + + if func is not None: + return decorator(func) + + return decorator + + +GrpType = t.TypeVar("GrpType", bound=Group) + + +# variant: no call, directly as decorator for a function. +@t.overload +def group(name: _AnyCallable) -> Group: ... + + +# variant: with positional name and with positional or keyword cls argument: +# @group(namearg, GroupCls, ...) or @group(namearg, cls=GroupCls, ...) +@t.overload +def group( + name: str | None, + cls: type[GrpType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], GrpType]: ... + + +# variant: name omitted, cls _must_ be a keyword argument, @group(cmd=GroupCls, ...) +@t.overload +def group( + name: None = None, + *, + cls: type[GrpType], + **attrs: t.Any, +) -> t.Callable[[_AnyCallable], GrpType]: ... + + +# variant: with optional string name, no cls argument provided. +@t.overload +def group( + name: str | None = ..., cls: None = None, **attrs: t.Any +) -> t.Callable[[_AnyCallable], Group]: ... + + +def group( + name: str | _AnyCallable | None = None, + cls: type[GrpType] | None = None, + **attrs: t.Any, +) -> Group | t.Callable[[_AnyCallable], Group | GrpType]: + """Creates a new :class:`Group` with a function as callback. This + works otherwise the same as :func:`command` just that the `cls` + parameter is set to :class:`Group`. + + .. versionchanged:: 8.1 + This decorator can be applied without parentheses. + """ + if cls is None: + cls = t.cast("type[GrpType]", Group) + + if callable(name): + return command(cls=cls, **attrs)(name) + + return command(name, cls, **attrs) + + +def _param_memo(f: t.Callable[..., t.Any], param: Parameter) -> None: + if isinstance(f, Command): + f.params.append(param) + else: + if not hasattr(f, "__click_params__"): + f.__click_params__ = [] # type: ignore + + f.__click_params__.append(param) # type: ignore + + +def argument( + *param_decls: str, cls: type[Argument] | None = None, **attrs: t.Any +) -> t.Callable[[FC], FC]: + """Attaches an argument to the command. All positional arguments are + passed as parameter declarations to :class:`Argument`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Argument` instance manually + and attaching it to the :attr:`Command.params` list. + + For the default argument class, refer to :class:`Argument` and + :class:`Parameter` for descriptions of parameters. + + :param cls: the argument class to instantiate. This defaults to + :class:`Argument`. + :param param_decls: Passed as positional arguments to the constructor of + ``cls``. + :param attrs: Passed as keyword arguments to the constructor of ``cls``. + """ + if cls is None: + cls = Argument + + def decorator(f: FC) -> FC: + _param_memo(f, cls(param_decls, **attrs)) + return f + + return decorator + + +def option( + *param_decls: str, cls: type[Option] | None = None, **attrs: t.Any +) -> t.Callable[[FC], FC]: + """Attaches an option to the command. All positional arguments are + passed as parameter declarations to :class:`Option`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Option` instance manually + and attaching it to the :attr:`Command.params` list. + + For the default option class, refer to :class:`Option` and + :class:`Parameter` for descriptions of parameters. + + :param cls: the option class to instantiate. This defaults to + :class:`Option`. + :param param_decls: Passed as positional arguments to the constructor of + ``cls``. + :param attrs: Passed as keyword arguments to the constructor of ``cls``. + """ + if cls is None: + cls = Option + + def decorator(f: FC) -> FC: + _param_memo(f, cls(param_decls, **attrs)) + return f + + return decorator + + +def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--yes`` option which shows a prompt before continuing if + not passed. If the prompt is declined, the program will exit. + + :param param_decls: One or more option names. Defaults to the single + value ``"--yes"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value: + ctx.abort() + + if not param_decls: + param_decls = ("--yes",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("callback", callback) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("prompt", "Do you want to continue?") + kwargs.setdefault("help", "Confirm the action without prompting.") + return option(*param_decls, **kwargs) + + +def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Add a ``--password`` option which prompts for a password, hiding + input and asking to enter the value again for confirmation. + + :param param_decls: One or more option names. Defaults to the single + value ``"--password"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + if not param_decls: + param_decls = ("--password",) + + kwargs.setdefault("prompt", True) + kwargs.setdefault("confirmation_prompt", True) + kwargs.setdefault("hide_input", True) + return option(*param_decls, **kwargs) + + +def version_option( + version: str | None = None, + *param_decls: str, + package_name: str | None = None, + prog_name: str | None = None, + message: str | None = None, + **kwargs: t.Any, +) -> t.Callable[[FC], FC]: + """Add a ``--version`` option which immediately prints the version + number and exits the program. + + If ``version`` is not provided, Click will try to detect it using + :func:`importlib.metadata.version` to get the version for the + ``package_name``. + + If ``package_name`` is not provided, Click will try to detect it by + inspecting the stack frames. This will be used to detect the + version, so it must match the name of the installed package. + + :param version: The version number to show. If not provided, Click + will try to detect it. + :param param_decls: One or more option names. Defaults to the single + value ``"--version"``. + :param package_name: The package name to detect the version from. If + not provided, Click will try to detect it. + :param prog_name: The name of the CLI to show in the message. If not + provided, it will be detected from the command. + :param message: The message to show. The values ``%(prog)s``, + ``%(package)s``, and ``%(version)s`` are available. Defaults to + ``"%(prog)s, version %(version)s"``. + :param kwargs: Extra arguments are passed to :func:`option`. + :raise RuntimeError: ``version`` could not be detected. + + .. versionchanged:: 8.0 + Add the ``package_name`` parameter, and the ``%(package)s`` + value for messages. + + .. versionchanged:: 8.0 + Use :mod:`importlib.metadata` instead of ``pkg_resources``. The + version is detected based on the package name, not the entry + point name. The Python package name must match the installed + package name, or be passed with ``package_name=``. + """ + if message is None: + message = _("%(prog)s, version %(version)s") + + if version is None and package_name is None: + frame = inspect.currentframe() + f_back = frame.f_back if frame is not None else None + f_globals = f_back.f_globals if f_back is not None else None + # break reference cycle + # https://docs.python.org/3/library/inspect.html#the-interpreter-stack + del frame + + if f_globals is not None: + package_name = f_globals.get("__name__") + + if package_name == "__main__": + package_name = f_globals.get("__package__") + + if package_name: + package_name = package_name.partition(".")[0] + + def callback(ctx: Context, param: Parameter, value: bool) -> None: + if not value or ctx.resilient_parsing: + return + + nonlocal prog_name + nonlocal version + + if prog_name is None: + prog_name = ctx.find_root().info_name + + if version is None and package_name is not None: + import importlib.metadata + + try: + version = importlib.metadata.version(package_name) + except importlib.metadata.PackageNotFoundError: + raise RuntimeError( + f"{package_name!r} is not installed. Try passing" + " 'package_name' instead." + ) from None + + if version is None: + raise RuntimeError( + f"Could not determine the version for {package_name!r} automatically." + ) + + echo( + message % {"prog": prog_name, "package": package_name, "version": version}, + color=ctx.color, + ) + ctx.exit() + + if not param_decls: + param_decls = ("--version",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show the version and exit.")) + kwargs["callback"] = callback + return option(*param_decls, **kwargs) + + +def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: + """Pre-configured ``--help`` option which immediately prints the help page + and exits the program. + + :param param_decls: One or more option names. Defaults to the single + value ``"--help"``. + :param kwargs: Extra arguments are passed to :func:`option`. + """ + + def show_help(ctx: Context, param: Parameter, value: bool) -> None: + """Callback that print the help page on ```` and exits.""" + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + if not param_decls: + param_decls = ("--help",) + + kwargs.setdefault("is_flag", True) + kwargs.setdefault("expose_value", False) + kwargs.setdefault("is_eager", True) + kwargs.setdefault("help", _("Show this message and exit.")) + kwargs.setdefault("callback", show_help) + + return option(*param_decls, **kwargs) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/exceptions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/exceptions.py new file mode 100644 index 0000000..4d782ee --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/exceptions.py @@ -0,0 +1,308 @@ +from __future__ import annotations + +import collections.abc as cabc +import typing as t +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import get_text_stderr +from .globals import resolve_color_default +from .utils import echo +from .utils import format_filename + +if t.TYPE_CHECKING: + from .core import Command + from .core import Context + from .core import Parameter + + +def _join_param_hints(param_hint: cabc.Sequence[str] | str | None) -> str | None: + if param_hint is not None and not isinstance(param_hint, str): + return " / ".join(repr(x) for x in param_hint) + + return param_hint + + +class ClickException(Exception): + """An exception that Click can handle and show to the user.""" + + #: The exit code for this exception. + exit_code = 1 + + def __init__(self, message: str) -> None: + super().__init__(message) + # The context will be removed by the time we print the message, so cache + # the color settings here to be used later on (in `show`) + self.show_color: bool | None = resolve_color_default() + self.message = message + + def format_message(self) -> str: + return self.message + + def __str__(self) -> str: + return self.message + + def show(self, file: t.IO[t.Any] | None = None) -> None: + if file is None: + file = get_text_stderr() + + echo( + _("Error: {message}").format(message=self.format_message()), + file=file, + color=self.show_color, + ) + + +class UsageError(ClickException): + """An internal exception that signals a usage error. This typically + aborts any further handling. + + :param message: the error message to display. + :param ctx: optionally the context that caused this error. Click will + fill in the context automatically in some situations. + """ + + exit_code = 2 + + def __init__(self, message: str, ctx: Context | None = None) -> None: + super().__init__(message) + self.ctx = ctx + self.cmd: Command | None = self.ctx.command if self.ctx else None + + def show(self, file: t.IO[t.Any] | None = None) -> None: + if file is None: + file = get_text_stderr() + color = None + hint = "" + if ( + self.ctx is not None + and self.ctx.command.get_help_option(self.ctx) is not None + ): + hint = _("Try '{command} {option}' for help.").format( + command=self.ctx.command_path, option=self.ctx.help_option_names[0] + ) + hint = f"{hint}\n" + if self.ctx is not None: + color = self.ctx.color + echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color) + echo( + _("Error: {message}").format(message=self.format_message()), + file=file, + color=color, + ) + + +class BadParameter(UsageError): + """An exception that formats out a standardized error message for a + bad parameter. This is useful when thrown from a callback or type as + Click will attach contextual information to it (for instance, which + parameter it is). + + .. versionadded:: 2.0 + + :param param: the parameter object that caused this error. This can + be left out, and Click will attach this info itself + if possible. + :param param_hint: a string that shows up as parameter name. This + can be used as alternative to `param` in cases + where custom validation should happen. If it is + a string it's used as such, if it's a list then + each item is quoted and separated. + """ + + def __init__( + self, + message: str, + ctx: Context | None = None, + param: Parameter | None = None, + param_hint: cabc.Sequence[str] | str | None = None, + ) -> None: + super().__init__(message, ctx) + self.param = param + self.param_hint = param_hint + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + return _("Invalid value: {message}").format(message=self.message) + + return _("Invalid value for {param_hint}: {message}").format( + param_hint=_join_param_hints(param_hint), message=self.message + ) + + +class MissingParameter(BadParameter): + """Raised if click required an option or argument but it was not + provided when invoking the script. + + .. versionadded:: 4.0 + + :param param_type: a string that indicates the type of the parameter. + The default is to inherit the parameter type from + the given `param`. Valid values are ``'parameter'``, + ``'option'`` or ``'argument'``. + """ + + def __init__( + self, + message: str | None = None, + ctx: Context | None = None, + param: Parameter | None = None, + param_hint: cabc.Sequence[str] | str | None = None, + param_type: str | None = None, + ) -> None: + super().__init__(message or "", ctx, param, param_hint) + self.param_type = param_type + + def format_message(self) -> str: + if self.param_hint is not None: + param_hint: cabc.Sequence[str] | str | None = self.param_hint + elif self.param is not None: + param_hint = self.param.get_error_hint(self.ctx) # type: ignore + else: + param_hint = None + + param_hint = _join_param_hints(param_hint) + param_hint = f" {param_hint}" if param_hint else "" + + param_type = self.param_type + if param_type is None and self.param is not None: + param_type = self.param.param_type_name + + msg = self.message + if self.param is not None: + msg_extra = self.param.type.get_missing_message( + param=self.param, ctx=self.ctx + ) + if msg_extra: + if msg: + msg += f". {msg_extra}" + else: + msg = msg_extra + + msg = f" {msg}" if msg else "" + + # Translate param_type for known types. + if param_type == "argument": + missing = _("Missing argument") + elif param_type == "option": + missing = _("Missing option") + elif param_type == "parameter": + missing = _("Missing parameter") + else: + missing = _("Missing {param_type}").format(param_type=param_type) + + return f"{missing}{param_hint}.{msg}" + + def __str__(self) -> str: + if not self.message: + param_name = self.param.name if self.param else None + return _("Missing parameter: {param_name}").format(param_name=param_name) + else: + return self.message + + +class NoSuchOption(UsageError): + """Raised if click attempted to handle an option that does not + exist. + + .. versionadded:: 4.0 + """ + + def __init__( + self, + option_name: str, + message: str | None = None, + possibilities: cabc.Sequence[str] | None = None, + ctx: Context | None = None, + ) -> None: + if message is None: + message = _("No such option: {name}").format(name=option_name) + + super().__init__(message, ctx) + self.option_name = option_name + self.possibilities = possibilities + + def format_message(self) -> str: + if not self.possibilities: + return self.message + + possibility_str = ", ".join(sorted(self.possibilities)) + suggest = ngettext( + "Did you mean {possibility}?", + "(Possible options: {possibilities})", + len(self.possibilities), + ).format(possibility=possibility_str, possibilities=possibility_str) + return f"{self.message} {suggest}" + + +class BadOptionUsage(UsageError): + """Raised if an option is generally supplied but the use of the option + was incorrect. This is for instance raised if the number of arguments + for an option is not correct. + + .. versionadded:: 4.0 + + :param option_name: the name of the option being used incorrectly. + """ + + def __init__( + self, option_name: str, message: str, ctx: Context | None = None + ) -> None: + super().__init__(message, ctx) + self.option_name = option_name + + +class BadArgumentUsage(UsageError): + """Raised if an argument is generally supplied but the use of the argument + was incorrect. This is for instance raised if the number of values + for an argument is not correct. + + .. versionadded:: 6.0 + """ + + +class NoArgsIsHelpError(UsageError): + def __init__(self, ctx: Context) -> None: + self.ctx: Context + super().__init__(ctx.get_help(), ctx=ctx) + + def show(self, file: t.IO[t.Any] | None = None) -> None: + echo(self.format_message(), file=file, err=True, color=self.ctx.color) + + +class FileError(ClickException): + """Raised if a file cannot be opened.""" + + def __init__(self, filename: str, hint: str | None = None) -> None: + if hint is None: + hint = _("unknown error") + + super().__init__(hint) + self.ui_filename: str = format_filename(filename) + self.filename = filename + + def format_message(self) -> str: + return _("Could not open file {filename!r}: {message}").format( + filename=self.ui_filename, message=self.message + ) + + +class Abort(RuntimeError): + """An internal signalling exception that signals Click to abort.""" + + +class Exit(RuntimeError): + """An exception that indicates that the application should exit with some + status code. + + :param code: the status code to exit with. + """ + + __slots__ = ("exit_code",) + + def __init__(self, code: int = 0) -> None: + self.exit_code: int = code diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/formatting.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/formatting.py new file mode 100644 index 0000000..0b64f83 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/formatting.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +import collections.abc as cabc +from contextlib import contextmanager +from gettext import gettext as _ + +from ._compat import term_len +from .parser import _split_opt + +# Can force a width. This is used by the test system +FORCED_WIDTH: int | None = None + + +def measure_table(rows: cabc.Iterable[tuple[str, str]]) -> tuple[int, ...]: + widths: dict[int, int] = {} + + for row in rows: + for idx, col in enumerate(row): + widths[idx] = max(widths.get(idx, 0), term_len(col)) + + return tuple(y for x, y in sorted(widths.items())) + + +def iter_rows( + rows: cabc.Iterable[tuple[str, str]], col_count: int +) -> cabc.Iterator[tuple[str, ...]]: + for row in rows: + yield row + ("",) * (col_count - len(row)) + + +def wrap_text( + text: str, + width: int = 78, + initial_indent: str = "", + subsequent_indent: str = "", + preserve_paragraphs: bool = False, +) -> str: + """A helper function that intelligently wraps text. By default, it + assumes that it operates on a single paragraph of text but if the + `preserve_paragraphs` parameter is provided it will intelligently + handle paragraphs (defined by two empty lines). + + If paragraphs are handled, a paragraph can be prefixed with an empty + line containing the ``\\b`` character (``\\x08``) to indicate that + no rewrapping should happen in that block. + + :param text: the text that should be rewrapped. + :param width: the maximum width for the text. + :param initial_indent: the initial indent that should be placed on the + first line as a string. + :param subsequent_indent: the indent string that should be placed on + each consecutive line. + :param preserve_paragraphs: if this flag is set then the wrapping will + intelligently handle paragraphs. + """ + from ._textwrap import TextWrapper + + text = text.expandtabs() + wrapper = TextWrapper( + width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False, + ) + if not preserve_paragraphs: + return wrapper.fill(text) + + p: list[tuple[int, bool, str]] = [] + buf: list[str] = [] + indent = None + + def _flush_par() -> None: + if not buf: + return + if buf[0].strip() == "\b": + p.append((indent or 0, True, "\n".join(buf[1:]))) + else: + p.append((indent or 0, False, " ".join(buf))) + del buf[:] + + for line in text.splitlines(): + if not line: + _flush_par() + indent = None + else: + if indent is None: + orig_len = term_len(line) + line = line.lstrip() + indent = orig_len - term_len(line) + buf.append(line) + _flush_par() + + rv = [] + for indent, raw, text in p: + with wrapper.extra_indent(" " * indent): + if raw: + rv.append(wrapper.indent_only(text)) + else: + rv.append(wrapper.fill(text)) + + return "\n\n".join(rv) + + +class HelpFormatter: + """This class helps with formatting text-based help pages. It's + usually just needed for very special internal cases, but it's also + exposed so that developers can write their own fancy outputs. + + At present, it always writes into memory. + + :param indent_increment: the additional increment for each level. + :param width: the width for the text. This defaults to the terminal + width clamped to a maximum of 78. + """ + + def __init__( + self, + indent_increment: int = 2, + width: int | None = None, + max_width: int | None = None, + ) -> None: + self.indent_increment = indent_increment + if max_width is None: + max_width = 80 + if width is None: + import shutil + + width = FORCED_WIDTH + if width is None: + width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50) + self.width = width + self.current_indent: int = 0 + self.buffer: list[str] = [] + + def write(self, string: str) -> None: + """Writes a unicode string into the internal buffer.""" + self.buffer.append(string) + + def indent(self) -> None: + """Increases the indentation.""" + self.current_indent += self.indent_increment + + def dedent(self) -> None: + """Decreases the indentation.""" + self.current_indent -= self.indent_increment + + def write_usage(self, prog: str, args: str = "", prefix: str | None = None) -> None: + """Writes a usage line into the buffer. + + :param prog: the program name. + :param args: whitespace separated list of arguments. + :param prefix: The prefix for the first line. Defaults to + ``"Usage: "``. + """ + if prefix is None: + prefix = f"{_('Usage:')} " + + usage_prefix = f"{prefix:>{self.current_indent}}{prog} " + text_width = self.width - self.current_indent + + if text_width >= (term_len(usage_prefix) + 20): + # The arguments will fit to the right of the prefix. + indent = " " * term_len(usage_prefix) + self.write( + wrap_text( + args, + text_width, + initial_indent=usage_prefix, + subsequent_indent=indent, + ) + ) + else: + # The prefix is too long, put the arguments on the next line. + self.write(usage_prefix) + self.write("\n") + indent = " " * (max(self.current_indent, term_len(prefix)) + 4) + self.write( + wrap_text( + args, text_width, initial_indent=indent, subsequent_indent=indent + ) + ) + + self.write("\n") + + def write_heading(self, heading: str) -> None: + """Writes a heading into the buffer.""" + self.write(f"{'':>{self.current_indent}}{heading}:\n") + + def write_paragraph(self) -> None: + """Writes a paragraph into the buffer.""" + if self.buffer: + self.write("\n") + + def write_text(self, text: str) -> None: + """Writes re-indented text into the buffer. This rewraps and + preserves paragraphs. + """ + indent = " " * self.current_indent + self.write( + wrap_text( + text, + self.width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True, + ) + ) + self.write("\n") + + def write_dl( + self, + rows: cabc.Sequence[tuple[str, str]], + col_max: int = 30, + col_spacing: int = 2, + ) -> None: + """Writes a definition list into the buffer. This is how options + and commands are usually formatted. + + :param rows: a list of two item tuples for the terms and values. + :param col_max: the maximum width of the first column. + :param col_spacing: the number of spaces between the first and + second column. + """ + rows = list(rows) + widths = measure_table(rows) + if len(widths) != 2: + raise TypeError("Expected two columns for definition list") + + first_col = min(widths[0], col_max) + col_spacing + + for first, second in iter_rows(rows, len(widths)): + self.write(f"{'':>{self.current_indent}}{first}") + if not second: + self.write("\n") + continue + if term_len(first) <= first_col - col_spacing: + self.write(" " * (first_col - term_len(first))) + else: + self.write("\n") + self.write(" " * (first_col + self.current_indent)) + + text_width = max(self.width - first_col - 2, 10) + wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) + lines = wrapped_text.splitlines() + + if lines: + self.write(f"{lines[0]}\n") + + for line in lines[1:]: + self.write(f"{'':>{first_col + self.current_indent}}{line}\n") + else: + self.write("\n") + + @contextmanager + def section(self, name: str) -> cabc.Iterator[None]: + """Helpful context manager that writes a paragraph, a heading, + and the indents. + + :param name: the section name that is written as heading. + """ + self.write_paragraph() + self.write_heading(name) + self.indent() + try: + yield + finally: + self.dedent() + + @contextmanager + def indentation(self) -> cabc.Iterator[None]: + """A context manager that increases the indentation.""" + self.indent() + try: + yield + finally: + self.dedent() + + def getvalue(self) -> str: + """Returns the buffer contents.""" + return "".join(self.buffer) + + +def join_options(options: cabc.Sequence[str]) -> tuple[str, bool]: + """Given a list of option strings this joins them in the most appropriate + way and returns them in the form ``(formatted_string, + any_prefix_is_slash)`` where the second item in the tuple is a flag that + indicates if any of the option prefixes was a slash. + """ + rv = [] + any_prefix_is_slash = False + + for opt in options: + prefix = _split_opt(opt)[0] + + if prefix == "/": + any_prefix_is_slash = True + + rv.append((len(prefix), opt)) + + rv.sort(key=lambda x: x[0]) + return ", ".join(x[1] for x in rv), any_prefix_is_slash diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/globals.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/globals.py new file mode 100644 index 0000000..a2f9172 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/globals.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import typing as t +from threading import local + +if t.TYPE_CHECKING: + from .core import Context + +_local = local() + + +@t.overload +def get_current_context(silent: t.Literal[False] = False) -> Context: ... + + +@t.overload +def get_current_context(silent: bool = ...) -> Context | None: ... + + +def get_current_context(silent: bool = False) -> Context | None: + """Returns the current click context. This can be used as a way to + access the current context object from anywhere. This is a more implicit + alternative to the :func:`pass_context` decorator. This function is + primarily useful for helpers such as :func:`echo` which might be + interested in changing its behavior based on the current context. + + To push the current context, :meth:`Context.scope` can be used. + + .. versionadded:: 5.0 + + :param silent: if set to `True` the return value is `None` if no context + is available. The default behavior is to raise a + :exc:`RuntimeError`. + """ + try: + return t.cast("Context", _local.stack[-1]) + except (AttributeError, IndexError) as e: + if not silent: + raise RuntimeError("There is no active click context.") from e + + return None + + +def push_context(ctx: Context) -> None: + """Pushes a new context to the current stack.""" + _local.__dict__.setdefault("stack", []).append(ctx) + + +def pop_context() -> None: + """Removes the top level from the stack.""" + _local.stack.pop() + + +def resolve_color_default(color: bool | None = None) -> bool | None: + """Internal helper to get the default value of the color flag. If a + value is passed it's returned unchanged, otherwise it's looked up from + the current context. + """ + if color is not None: + return color + + ctx = get_current_context(silent=True) + + if ctx is not None: + return ctx.color + + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/parser.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/parser.py new file mode 100644 index 0000000..1ea1f71 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/parser.py @@ -0,0 +1,532 @@ +""" +This module started out as largely a copy paste from the stdlib's +optparse module with the features removed that we do not need from +optparse because we implement them in Click on a higher level (for +instance type handling, help formatting and a lot more). + +The plan is to remove more and more from here over time. + +The reason this is a different module and not optparse from the stdlib +is that there are differences in 2.x and 3.x about the error messages +generated and optparse in the stdlib uses gettext for no good reason +and might cause us issues. + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright 2001-2006 Gregory P. Ward. All rights reserved. +Copyright 2002-2006 Python Software Foundation. All rights reserved. +""" + +# This code uses parts of optparse written by Gregory P. Ward and +# maintained by the Python Software Foundation. +# Copyright 2001-2006 Gregory P. Ward +# Copyright 2002-2006 Python Software Foundation +from __future__ import annotations + +import collections.abc as cabc +import typing as t +from collections import deque +from gettext import gettext as _ +from gettext import ngettext + +from ._utils import FLAG_NEEDS_VALUE +from ._utils import UNSET +from .exceptions import BadArgumentUsage +from .exceptions import BadOptionUsage +from .exceptions import NoSuchOption +from .exceptions import UsageError + +if t.TYPE_CHECKING: + from ._utils import T_FLAG_NEEDS_VALUE + from ._utils import T_UNSET + from .core import Argument as CoreArgument + from .core import Context + from .core import Option as CoreOption + from .core import Parameter as CoreParameter + +V = t.TypeVar("V") + + +def _unpack_args( + args: cabc.Sequence[str], nargs_spec: cabc.Sequence[int] +) -> tuple[cabc.Sequence[str | cabc.Sequence[str | None] | None], list[str]]: + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with ``UNSET``. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv: list[str | tuple[str | T_UNSET, ...] | T_UNSET] = [] + spos: int | None = None + + def _fetch(c: deque[V]) -> V | T_UNSET: + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return UNSET + + while nargs_spec: + nargs = _fetch(nargs_spec) + + if nargs is None: + continue + + if nargs == 1: + rv.append(_fetch(args)) # type: ignore[arg-type] + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError("Cannot have two nargs < 0") + + spos = len(rv) + rv.append(UNSET) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1 :] = reversed(rv[spos + 1 :]) + + return tuple(rv), list(args) + + +def _split_opt(opt: str) -> tuple[str, str]: + first = opt[:1] + if first.isalnum(): + return "", opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def _normalize_opt(opt: str, ctx: Context | None) -> str: + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = _split_opt(opt) + return f"{prefix}{ctx.token_normalize_func(opt)}" + + +class _Option: + def __init__( + self, + obj: CoreOption, + opts: cabc.Sequence[str], + dest: str | None, + action: str | None = None, + nargs: int = 1, + const: t.Any | None = None, + ): + self._short_opts = [] + self._long_opts = [] + self.prefixes: set[str] = set() + + for opt in opts: + prefix, value = _split_opt(opt) + if not prefix: + raise ValueError(f"Invalid start character for option ({opt})") + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = "store" + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self) -> bool: + return self.action in ("store", "append") + + def process(self, value: t.Any, state: _ParsingState) -> None: + if self.action == "store": + state.opts[self.dest] = value # type: ignore + elif self.action == "store_const": + state.opts[self.dest] = self.const # type: ignore + elif self.action == "append": + state.opts.setdefault(self.dest, []).append(value) # type: ignore + elif self.action == "append_const": + state.opts.setdefault(self.dest, []).append(self.const) # type: ignore + elif self.action == "count": + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore + else: + raise ValueError(f"unknown action '{self.action}'") + state.order.append(self.obj) + + +class _Argument: + def __init__(self, obj: CoreArgument, dest: str | None, nargs: int = 1): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process( + self, + value: str | cabc.Sequence[str | None] | None | T_UNSET, + state: _ParsingState, + ) -> None: + if self.nargs > 1: + assert isinstance(value, cabc.Sequence) + holes = sum(1 for x in value if x is UNSET) + if holes == len(value): + value = UNSET + elif holes != 0: + raise BadArgumentUsage( + _("Argument {name!r} takes {nargs} values.").format( + name=self.dest, nargs=self.nargs + ) + ) + + # We failed to collect any argument value so we consider the argument as unset. + if value == (): + value = UNSET + + state.opts[self.dest] = value # type: ignore + state.order.append(self.obj) + + +class _ParsingState: + def __init__(self, rargs: list[str]) -> None: + self.opts: dict[str, t.Any] = {} + self.largs: list[str] = [] + self.rargs = rargs + self.order: list[CoreParameter] = [] + + +class _OptionParser: + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + + .. deprecated:: 8.2 + Will be removed in Click 9.0. + """ + + def __init__(self, ctx: Context | None = None) -> None: + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args: bool = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options: bool = False + + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + + self._short_opt: dict[str, _Option] = {} + self._long_opt: dict[str, _Option] = {} + self._opt_prefixes = {"-", "--"} + self._args: list[_Argument] = [] + + def add_option( + self, + obj: CoreOption, + opts: cabc.Sequence[str], + dest: str | None, + action: str | None = None, + nargs: int = 1, + const: t.Any | None = None, + ) -> None: + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``append_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + opts = [_normalize_opt(opt, self.ctx) for opt in opts] + option = _Option(obj, opts, dest, action=action, nargs=nargs, const=const) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument(self, obj: CoreArgument, dest: str | None, nargs: int = 1) -> None: + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + self._args.append(_Argument(obj, dest=dest, nargs=nargs)) + + def parse_args( + self, args: list[str] + ) -> tuple[dict[str, t.Any], list[str], list[CoreParameter]]: + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = _ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state: _ParsingState) -> None: + pargs, args = _unpack_args( + state.largs + state.rargs, [x.nargs for x in self._args] + ) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state: _ParsingState) -> None: + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == "--": + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt( + self, opt: str, explicit_value: str | None, state: _ParsingState + ) -> None: + if opt not in self._long_opt: + from difflib import get_close_matches + + possibilities = get_close_matches(opt, self._long_opt) + raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + value = self._get_value_from_state(opt, option, state) + + elif explicit_value is not None: + raise BadOptionUsage( + opt, _("Option {name!r} does not take a value.").format(name=opt) + ) + + else: + value = UNSET + + option.process(value, state) + + def _match_short_opt(self, arg: str, state: _ParsingState) -> None: + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = _normalize_opt(f"{prefix}{ch}", self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt, ctx=self.ctx) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + value = self._get_value_from_state(opt, option, state) + + else: + value = UNSET + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we recombine the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(f"{prefix}{''.join(unknown_options)}") + + def _get_value_from_state( + self, option_name: str, option: _Option, state: _ParsingState + ) -> str | cabc.Sequence[str] | T_FLAG_NEEDS_VALUE: + nargs = option.nargs + + value: str | cabc.Sequence[str] | T_FLAG_NEEDS_VALUE + + if len(state.rargs) < nargs: + if option.obj._flag_needs_value: + # Option allows omitting the value. + value = FLAG_NEEDS_VALUE + else: + raise BadOptionUsage( + option_name, + ngettext( + "Option {name!r} requires an argument.", + "Option {name!r} requires {nargs} arguments.", + nargs, + ).format(name=option_name, nargs=nargs), + ) + elif nargs == 1: + next_rarg = state.rargs[0] + + if ( + option.obj._flag_needs_value + and isinstance(next_rarg, str) + and next_rarg[:1] in self._opt_prefixes + and len(next_rarg) > 1 + ): + # The next arg looks like the start of an option, don't + # use it as the value if omitting the value is allowed. + value = FLAG_NEEDS_VALUE + else: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + return value + + def _process_opts(self, arg: str, state: _ParsingState) -> None: + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if "=" in arg: + long_opt, explicit_value = arg.split("=", 1) + else: + long_opt = arg + norm_long_opt = _normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + self._match_short_opt(arg, state) + return + + if not self.ignore_unknown_options: + raise + + state.largs.append(arg) + + +def __getattr__(name: str) -> object: + import warnings + + if name in { + "OptionParser", + "Argument", + "Option", + "split_opt", + "normalize_opt", + "ParsingState", + }: + warnings.warn( + f"'parser.{name}' is deprecated and will be removed in Click 9.0." + " The old parser is available in 'optparse'.", + DeprecationWarning, + stacklevel=2, + ) + return globals()[f"_{name}"] + + if name == "split_arg_string": + from .shell_completion import split_arg_string + + warnings.warn( + "Importing 'parser.split_arg_string' is deprecated, it will only be" + " available in 'shell_completion' in Click 9.0.", + DeprecationWarning, + stacklevel=2, + ) + return split_arg_string + + raise AttributeError(name) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/py.typed b/tools/converter-generator/venv/lib/python3.11/site-packages/click/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/shell_completion.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/shell_completion.py new file mode 100644 index 0000000..8f1564c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/shell_completion.py @@ -0,0 +1,667 @@ +from __future__ import annotations + +import collections.abc as cabc +import os +import re +import typing as t +from gettext import gettext as _ + +from .core import Argument +from .core import Command +from .core import Context +from .core import Group +from .core import Option +from .core import Parameter +from .core import ParameterSource +from .utils import echo + + +def shell_complete( + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str, + instruction: str, +) -> int: + """Perform shell completion for the given CLI program. + + :param cli: Command being called. + :param ctx_args: Extra arguments to pass to + ``cli.make_context``. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + :param instruction: Value of ``complete_var`` with the completion + instruction and shell, in the form ``instruction_shell``. + :return: Status code to exit with. + """ + shell, _, instruction = instruction.partition("_") + comp_cls = get_completion_class(shell) + + if comp_cls is None: + return 1 + + comp = comp_cls(cli, ctx_args, prog_name, complete_var) + + if instruction == "source": + echo(comp.source()) + return 0 + + if instruction == "complete": + echo(comp.complete()) + return 0 + + return 1 + + +class CompletionItem: + """Represents a completion value and metadata about the value. The + default metadata is ``type`` to indicate special shell handling, + and ``help`` if a shell supports showing a help string next to the + value. + + Arbitrary parameters can be passed when creating the object, and + accessed using ``item.attr``. If an attribute wasn't passed, + accessing it returns ``None``. + + :param value: The completion suggestion. + :param type: Tells the shell script to provide special completion + support for the type. Click uses ``"dir"`` and ``"file"``. + :param help: String shown next to the value if supported. + :param kwargs: Arbitrary metadata. The built-in implementations + don't use this, but custom type completions paired with custom + shell support could use it. + """ + + __slots__ = ("value", "type", "help", "_info") + + def __init__( + self, + value: t.Any, + type: str = "plain", + help: str | None = None, + **kwargs: t.Any, + ) -> None: + self.value: t.Any = value + self.type: str = type + self.help: str | None = help + self._info = kwargs + + def __getattr__(self, name: str) -> t.Any: + return self._info.get(name) + + +# Only Bash >= 4.4 has the nosort option. +_SOURCE_BASH = """\ +%(complete_func)s() { + local IFS=$'\\n' + local response + + response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \ +%(complete_var)s=bash_complete $1) + + for completion in $response; do + IFS=',' read type value <<< "$completion" + + if [[ $type == 'dir' ]]; then + COMPREPLY=() + compopt -o dirnames + elif [[ $type == 'file' ]]; then + COMPREPLY=() + compopt -o default + elif [[ $type == 'plain' ]]; then + COMPREPLY+=($value) + fi + done + + return 0 +} + +%(complete_func)s_setup() { + complete -o nosort -F %(complete_func)s %(prog_name)s +} + +%(complete_func)s_setup; +""" + +# See ZshComplete.format_completion below, and issue #2703, before +# changing this script. +# +# (TL;DR: _describe is picky about the format, but this Zsh script snippet +# is already widely deployed. So freeze this script, and use clever-ish +# handling of colons in ZshComplet.format_completion.) +_SOURCE_ZSH = """\ +#compdef %(prog_name)s + +%(complete_func)s() { + local -a completions + local -a completions_with_descriptions + local -a response + (( ! $+commands[%(prog_name)s] )) && return 1 + + response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \ +%(complete_var)s=zsh_complete %(prog_name)s)}") + + for type key descr in ${response}; do + if [[ "$type" == "plain" ]]; then + if [[ "$descr" == "_" ]]; then + completions+=("$key") + else + completions_with_descriptions+=("$key":"$descr") + fi + elif [[ "$type" == "dir" ]]; then + _path_files -/ + elif [[ "$type" == "file" ]]; then + _path_files -f + fi + done + + if [ -n "$completions_with_descriptions" ]; then + _describe -V unsorted completions_with_descriptions -U + fi + + if [ -n "$completions" ]; then + compadd -U -V unsorted -a completions + fi +} + +if [[ $zsh_eval_context[-1] == loadautofunc ]]; then + # autoload from fpath, call function directly + %(complete_func)s "$@" +else + # eval/source/. command, register function for later + compdef %(complete_func)s %(prog_name)s +fi +""" + +_SOURCE_FISH = """\ +function %(complete_func)s; + set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \ +COMP_CWORD=(commandline -t) %(prog_name)s); + + for completion in $response; + set -l metadata (string split "," $completion); + + if test $metadata[1] = "dir"; + __fish_complete_directories $metadata[2]; + else if test $metadata[1] = "file"; + __fish_complete_path $metadata[2]; + else if test $metadata[1] = "plain"; + echo $metadata[2]; + end; + end; +end; + +complete --no-files --command %(prog_name)s --arguments \ +"(%(complete_func)s)"; +""" + + +class ShellComplete: + """Base class for providing shell completion support. A subclass for + a given shell will override attributes and methods to implement the + completion instructions (``source`` and ``complete``). + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param complete_var: Name of the environment variable that holds + the completion instruction. + + .. versionadded:: 8.0 + """ + + name: t.ClassVar[str] + """Name to register the shell as with :func:`add_completion_class`. + This is used in completion instructions (``{name}_source`` and + ``{name}_complete``). + """ + + source_template: t.ClassVar[str] + """Completion script template formatted by :meth:`source`. This must + be provided by subclasses. + """ + + def __init__( + self, + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + complete_var: str, + ) -> None: + self.cli = cli + self.ctx_args = ctx_args + self.prog_name = prog_name + self.complete_var = complete_var + + @property + def func_name(self) -> str: + """The name of the shell function defined by the completion + script. + """ + safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), flags=re.ASCII) + return f"_{safe_name}_completion" + + def source_vars(self) -> dict[str, t.Any]: + """Vars for formatting :attr:`source_template`. + + By default this provides ``complete_func``, ``complete_var``, + and ``prog_name``. + """ + return { + "complete_func": self.func_name, + "complete_var": self.complete_var, + "prog_name": self.prog_name, + } + + def source(self) -> str: + """Produce the shell script that defines the completion + function. By default this ``%``-style formats + :attr:`source_template` with the dict returned by + :meth:`source_vars`. + """ + return self.source_template % self.source_vars() + + def get_completion_args(self) -> tuple[list[str], str]: + """Use the env vars defined by the shell script to return a + tuple of ``args, incomplete``. This must be implemented by + subclasses. + """ + raise NotImplementedError + + def get_completions(self, args: list[str], incomplete: str) -> list[CompletionItem]: + """Determine the context and last complete command or parameter + from the complete args. Call that object's ``shell_complete`` + method to get the completions for the incomplete value. + + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args) + obj, incomplete = _resolve_incomplete(ctx, args, incomplete) + return obj.shell_complete(ctx, incomplete) + + def format_completion(self, item: CompletionItem) -> str: + """Format a completion item into the form recognized by the + shell script. This must be implemented by subclasses. + + :param item: Completion item to format. + """ + raise NotImplementedError + + def complete(self) -> str: + """Produce the completion data to send back to the shell. + + By default this calls :meth:`get_completion_args`, gets the + completions, then calls :meth:`format_completion` for each + completion. + """ + args, incomplete = self.get_completion_args() + completions = self.get_completions(args, incomplete) + out = [self.format_completion(item) for item in completions] + return "\n".join(out) + + +class BashComplete(ShellComplete): + """Shell completion for Bash.""" + + name = "bash" + source_template = _SOURCE_BASH + + @staticmethod + def _check_version() -> None: + import shutil + import subprocess + + bash_exe = shutil.which("bash") + + if bash_exe is None: + match = None + else: + output = subprocess.run( + [bash_exe, "--norc", "-c", 'echo "${BASH_VERSION}"'], + stdout=subprocess.PIPE, + ) + match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode()) + + if match is not None: + major, minor = match.groups() + + if major < "4" or major == "4" and minor < "4": + echo( + _( + "Shell completion is not supported for Bash" + " versions older than 4.4." + ), + err=True, + ) + else: + echo( + _("Couldn't detect Bash version, shell completion is not supported."), + err=True, + ) + + def source(self) -> str: + self._check_version() + return super().source() + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + return f"{item.type},{item.value}" + + +class ZshComplete(ShellComplete): + """Shell completion for Zsh.""" + + name = "zsh" + source_template = _SOURCE_ZSH + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + cword = int(os.environ["COMP_CWORD"]) + args = cwords[1:cword] + + try: + incomplete = cwords[cword] + except IndexError: + incomplete = "" + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + help_ = item.help or "_" + # The zsh completion script uses `_describe` on items with help + # texts (which splits the item help from the item value at the + # first unescaped colon) and `compadd` on items without help + # text (which uses the item value as-is and does not support + # colon escaping). So escape colons in the item value if and + # only if the item help is not the sentinel "_" value, as used + # by the completion script. + # + # (The zsh completion script is potentially widely deployed, and + # thus harder to fix than this method.) + # + # See issue #1812 and issue #2703 for further context. + value = item.value.replace(":", r"\:") if help_ != "_" else item.value + return f"{item.type}\n{value}\n{help_}" + + +class FishComplete(ShellComplete): + """Shell completion for Fish.""" + + name = "fish" + source_template = _SOURCE_FISH + + def get_completion_args(self) -> tuple[list[str], str]: + cwords = split_arg_string(os.environ["COMP_WORDS"]) + incomplete = os.environ["COMP_CWORD"] + if incomplete: + incomplete = split_arg_string(incomplete)[0] + args = cwords[1:] + + # Fish stores the partial word in both COMP_WORDS and + # COMP_CWORD, remove it from complete args. + if incomplete and args and args[-1] == incomplete: + args.pop() + + return args, incomplete + + def format_completion(self, item: CompletionItem) -> str: + if item.help: + return f"{item.type},{item.value}\t{item.help}" + + return f"{item.type},{item.value}" + + +ShellCompleteType = t.TypeVar("ShellCompleteType", bound="type[ShellComplete]") + + +_available_shells: dict[str, type[ShellComplete]] = { + "bash": BashComplete, + "fish": FishComplete, + "zsh": ZshComplete, +} + + +def add_completion_class( + cls: ShellCompleteType, name: str | None = None +) -> ShellCompleteType: + """Register a :class:`ShellComplete` subclass under the given name. + The name will be provided by the completion instruction environment + variable during completion. + + :param cls: The completion class that will handle completion for the + shell. + :param name: Name to register the class under. Defaults to the + class's ``name`` attribute. + """ + if name is None: + name = cls.name + + _available_shells[name] = cls + + return cls + + +def get_completion_class(shell: str) -> type[ShellComplete] | None: + """Look up a registered :class:`ShellComplete` subclass by the name + provided by the completion instruction environment variable. If the + name isn't registered, returns ``None``. + + :param shell: Name the class is registered under. + """ + return _available_shells.get(shell) + + +def split_arg_string(string: str) -> list[str]: + """Split an argument string as with :func:`shlex.split`, but don't + fail if the string is incomplete. Ignores a missing closing quote or + incomplete escape sequence and uses the partial token as-is. + + .. code-block:: python + + split_arg_string("example 'my file") + ["example", "my file"] + + split_arg_string("example my\\") + ["example", "my"] + + :param string: String to split. + + .. versionchanged:: 8.2 + Moved to ``shell_completion`` from ``parser``. + """ + import shlex + + lex = shlex.shlex(string, posix=True) + lex.whitespace_split = True + lex.commenters = "" + out = [] + + try: + for token in lex: + out.append(token) + except ValueError: + # Raised when end-of-string is reached in an invalid state. Use + # the partial token as-is. The quote or escape character is in + # lex.state, not lex.token. + out.append(lex.token) + + return out + + +def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool: + """Determine if the given parameter is an argument that can still + accept values. + + :param ctx: Invocation context for the command represented by the + parsed complete args. + :param param: Argument object being checked. + """ + if not isinstance(param, Argument): + return False + + assert param.name is not None + # Will be None if expose_value is False. + value = ctx.params.get(param.name) + return ( + param.nargs == -1 + or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE + or ( + param.nargs > 1 + and isinstance(value, (tuple, list)) + and len(value) < param.nargs + ) + ) + + +def _start_of_option(ctx: Context, value: str) -> bool: + """Check if the value looks like the start of an option.""" + if not value: + return False + + c = value[0] + return c in ctx._opt_prefixes + + +def _is_incomplete_option(ctx: Context, args: list[str], param: Parameter) -> bool: + """Determine if the given parameter is an option that needs a value. + + :param args: List of complete args before the incomplete value. + :param param: Option object being checked. + """ + if not isinstance(param, Option): + return False + + if param.is_flag or param.count: + return False + + last_option = None + + for index, arg in enumerate(reversed(args)): + if index + 1 > param.nargs: + break + + if _start_of_option(ctx, arg): + last_option = arg + break + + return last_option is not None and last_option in param.opts + + +def _resolve_context( + cli: Command, + ctx_args: cabc.MutableMapping[str, t.Any], + prog_name: str, + args: list[str], +) -> Context: + """Produce the context hierarchy starting with the command and + traversing the complete arguments. This only follows the commands, + it doesn't trigger input prompts or callbacks. + + :param cli: Command being called. + :param prog_name: Name of the executable in the shell. + :param args: List of complete args before the incomplete value. + """ + ctx_args["resilient_parsing"] = True + with cli.make_context(prog_name, args.copy(), **ctx_args) as ctx: + args = ctx._protected_args + ctx.args + + while args: + command = ctx.command + + if isinstance(command, Group): + if not command.chain: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + with cmd.make_context( + name, args, parent=ctx, resilient_parsing=True + ) as sub_ctx: + ctx = sub_ctx + args = ctx._protected_args + ctx.args + else: + sub_ctx = ctx + + while args: + name, cmd, args = command.resolve_command(ctx, args) + + if cmd is None: + return ctx + + with cmd.make_context( + name, + args, + parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False, + resilient_parsing=True, + ) as sub_sub_ctx: + sub_ctx = sub_sub_ctx + args = sub_ctx.args + + ctx = sub_ctx + args = [*sub_ctx._protected_args, *sub_ctx.args] + else: + break + + return ctx + + +def _resolve_incomplete( + ctx: Context, args: list[str], incomplete: str +) -> tuple[Command | Parameter, str]: + """Find the Click object that will handle the completion of the + incomplete value. Return the object and the incomplete value. + + :param ctx: Invocation context for the command represented by + the parsed complete args. + :param args: List of complete args before the incomplete value. + :param incomplete: Value being completed. May be empty. + """ + # Different shells treat an "=" between a long option name and + # value differently. Might keep the value joined, return the "=" + # as a separate item, or return the split name and value. Always + # split and discard the "=" to make completion easier. + if incomplete == "=": + incomplete = "" + elif "=" in incomplete and _start_of_option(ctx, incomplete): + name, _, incomplete = incomplete.partition("=") + args.append(name) + + # The "--" marker tells Click to stop treating values as options + # even if they start with the option character. If it hasn't been + # given and the incomplete arg looks like an option, the current + # command will provide option name completions. + if "--" not in args and _start_of_option(ctx, incomplete): + return ctx.command, incomplete + + params = ctx.command.get_params(ctx) + + # If the last complete arg is an option name with an incomplete + # value, the option will provide value completions. + for param in params: + if _is_incomplete_option(ctx, args, param): + return param, incomplete + + # It's not an option name or value. The first argument without a + # parsed value will provide value completions. + for param in params: + if _is_incomplete_argument(ctx, param): + return param, incomplete + + # There were no unparsed arguments, the command may be a group that + # will provide command name completions. + return ctx.command, incomplete diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/termui.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/termui.py new file mode 100644 index 0000000..dcbb222 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/termui.py @@ -0,0 +1,877 @@ +from __future__ import annotations + +import collections.abc as cabc +import inspect +import io +import itertools +import sys +import typing as t +from contextlib import AbstractContextManager +from gettext import gettext as _ + +from ._compat import isatty +from ._compat import strip_ansi +from .exceptions import Abort +from .exceptions import UsageError +from .globals import resolve_color_default +from .types import Choice +from .types import convert_type +from .types import ParamType +from .utils import echo +from .utils import LazyFile + +if t.TYPE_CHECKING: + from ._termui_impl import ProgressBar + +V = t.TypeVar("V") + +# The prompt functions to use. The doc tools currently override these +# functions to customize how they work. +visible_prompt_func: t.Callable[[str], str] = input + +_ansi_colors = { + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, + "reset": 39, + "bright_black": 90, + "bright_red": 91, + "bright_green": 92, + "bright_yellow": 93, + "bright_blue": 94, + "bright_magenta": 95, + "bright_cyan": 96, + "bright_white": 97, +} +_ansi_reset_all = "\033[0m" + + +def hidden_prompt_func(prompt: str) -> str: + import getpass + + return getpass.getpass(prompt) + + +def _build_prompt( + text: str, + suffix: str, + show_default: bool = False, + default: t.Any | None = None, + show_choices: bool = True, + type: ParamType | None = None, +) -> str: + prompt = text + if type is not None and show_choices and isinstance(type, Choice): + prompt += f" ({', '.join(map(str, type.choices))})" + if default is not None and show_default: + prompt = f"{prompt} [{_format_default(default)}]" + return f"{prompt}{suffix}" + + +def _format_default(default: t.Any) -> t.Any: + if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): + return default.name + + return default + + +def prompt( + text: str, + default: t.Any | None = None, + hide_input: bool = False, + confirmation_prompt: bool | str = False, + type: ParamType | t.Any | None = None, + value_proc: t.Callable[[str], t.Any] | None = None, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, + show_choices: bool = True, +) -> t.Any: + """Prompts a user for input. This is a convenience function that can + be used to prompt a user for input later. + + If the user aborts the input by sending an interrupt signal, this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the text to show for the prompt. + :param default: the default value to use if no input happens. If this + is not given it will prompt until it's aborted. + :param hide_input: if this is set to true then the input value will + be hidden. + :param confirmation_prompt: Prompt a second time to confirm the + value. Can be set to a string instead of ``True`` to customize + the message. + :param type: the type to use to check the value against. + :param value_proc: if this parameter is provided it's a function that + is invoked instead of the type conversion to + convert a value. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + :param show_choices: Show or hide choices if the passed type is a Choice. + For example if type is a Choice of either day or week, + show_choices is true and text is "Group by" then the + prompt will be "Group by (day, week): ". + + .. versionadded:: 8.0 + ``confirmation_prompt`` can be a custom string. + + .. versionadded:: 7.0 + Added the ``show_choices`` parameter. + + .. versionadded:: 6.0 + Added unicode support for cmd.exe on Windows. + + .. versionadded:: 4.0 + Added the `err` parameter. + + """ + + def prompt_func(text: str) -> str: + f = hidden_prompt_func if hide_input else visible_prompt_func + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(text.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + return f(" ") + except (KeyboardInterrupt, EOFError): + # getpass doesn't print a newline if the user aborts input with ^C. + # Allegedly this behavior is inherited from getpass(3). + # A doc bug has been filed at https://bugs.python.org/issue24711 + if hide_input: + echo(None, err=err) + raise Abort() from None + + if value_proc is None: + value_proc = convert_type(type, default) + + prompt = _build_prompt( + text, prompt_suffix, show_default, default, show_choices, type + ) + + if confirmation_prompt: + if confirmation_prompt is True: + confirmation_prompt = _("Repeat for confirmation") + + confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix) + + while True: + while True: + value = prompt_func(prompt) + if value: + break + elif default is not None: + value = default + break + try: + result = value_proc(value) + except UsageError as e: + if hide_input: + echo(_("Error: The value you entered was invalid."), err=err) + else: + echo(_("Error: {e.message}").format(e=e), err=err) + continue + if not confirmation_prompt: + return result + while True: + value2 = prompt_func(confirmation_prompt) + is_empty = not value and not value2 + if value2 or is_empty: + break + if value == value2: + return result + echo(_("Error: The two entered values do not match."), err=err) + + +def confirm( + text: str, + default: bool | None = False, + abort: bool = False, + prompt_suffix: str = ": ", + show_default: bool = True, + err: bool = False, +) -> bool: + """Prompts for confirmation (yes/no question). + + If the user aborts the input by sending a interrupt signal this + function will catch it and raise a :exc:`Abort` exception. + + :param text: the question to ask. + :param default: The default value to use when no input is given. If + ``None``, repeat until input is given. + :param abort: if this is set to `True` a negative answer aborts the + exception by raising :exc:`Abort`. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + + .. versionchanged:: 8.0 + Repeat until input is given if ``default`` is ``None``. + + .. versionadded:: 4.0 + Added the ``err`` parameter. + """ + prompt = _build_prompt( + text, + prompt_suffix, + show_default, + "y/n" if default is None else ("Y/n" if default else "y/N"), + ) + + while True: + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(prompt.rstrip(" "), nl=False, err=err) + # Echo a space to stdout to work around an issue where + # readline causes backspace to clear the whole line. + value = visible_prompt_func(" ").lower().strip() + except (KeyboardInterrupt, EOFError): + raise Abort() from None + if value in ("y", "yes"): + rv = True + elif value in ("n", "no"): + rv = False + elif default is not None and value == "": + rv = default + else: + echo(_("Error: invalid input"), err=err) + continue + break + if abort and not rv: + raise Abort() + return rv + + +def echo_via_pager( + text_or_generator: cabc.Iterable[str] | t.Callable[[], cabc.Iterable[str]] | str, + color: bool | None = None, +) -> None: + """This function takes a text and shows it via an environment specific + pager on stdout. + + .. versionchanged:: 3.0 + Added the `color` flag. + + :param text_or_generator: the text to page, or alternatively, a + generator emitting the text to page. + :param color: controls if the pager supports ANSI colors or not. The + default is autodetection. + """ + color = resolve_color_default(color) + + if inspect.isgeneratorfunction(text_or_generator): + i = t.cast("t.Callable[[], cabc.Iterable[str]]", text_or_generator)() + elif isinstance(text_or_generator, str): + i = [text_or_generator] + else: + i = iter(t.cast("cabc.Iterable[str]", text_or_generator)) + + # convert every element of i to a text type if necessary + text_generator = (el if isinstance(el, str) else str(el) for el in i) + + from ._termui_impl import pager + + return pager(itertools.chain(text_generator, "\n"), color) + + +@t.overload +def progressbar( + *, + length: int, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[int]: ... + + +@t.overload +def progressbar( + iterable: cabc.Iterable[V] | None = None, + length: int | None = None, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[V]: ... + + +def progressbar( + iterable: cabc.Iterable[V] | None = None, + length: int | None = None, + label: str | None = None, + hidden: bool = False, + show_eta: bool = True, + show_percent: bool | None = None, + show_pos: bool = False, + item_show_func: t.Callable[[V | None], str | None] | None = None, + fill_char: str = "#", + empty_char: str = "-", + bar_template: str = "%(label)s [%(bar)s] %(info)s", + info_sep: str = " ", + width: int = 36, + file: t.TextIO | None = None, + color: bool | None = None, + update_min_steps: int = 1, +) -> ProgressBar[V]: + """This function creates an iterable context manager that can be used + to iterate over something while showing a progress bar. It will + either iterate over the `iterable` or `length` items (that are counted + up). While iteration happens, this function will print a rendered + progress bar to the given `file` (defaults to stdout) and will attempt + to calculate remaining time and more. By default, this progress bar + will not be rendered if the file is not a terminal. + + The context manager creates the progress bar. When the context + manager is entered the progress bar is already created. With every + iteration over the progress bar, the iterable passed to the bar is + advanced and the bar is updated. When the context manager exits, + a newline is printed and the progress bar is finalized on screen. + + Note: The progress bar is currently designed for use cases where the + total progress can be expected to take at least several seconds. + Because of this, the ProgressBar class object won't display + progress that is considered too fast, and progress where the time + between steps is less than a second. + + No printing must happen or the progress bar will be unintentionally + destroyed. + + Example usage:: + + with progressbar(items) as bar: + for item in bar: + do_something_with(item) + + Alternatively, if no iterable is specified, one can manually update the + progress bar through the `update()` method instead of directly + iterating over the progress bar. The update method accepts the number + of steps to increment the bar with:: + + with progressbar(length=chunks.total_bytes) as bar: + for chunk in chunks: + process_chunk(chunk) + bar.update(chunks.bytes) + + The ``update()`` method also takes an optional value specifying the + ``current_item`` at the new position. This is useful when used + together with ``item_show_func`` to customize the output for each + manual step:: + + with click.progressbar( + length=total_size, + label='Unzipping archive', + item_show_func=lambda a: a.filename + ) as bar: + for archive in zip_file: + archive.extract() + bar.update(archive.size, archive) + + :param iterable: an iterable to iterate over. If not provided the length + is required. + :param length: the number of items to iterate over. By default the + progressbar will attempt to ask the iterator about its + length, which might or might not work. If an iterable is + also provided this parameter can be used to override the + length. If an iterable is not provided the progress bar + will iterate over a range of that length. + :param label: the label to show next to the progress bar. + :param hidden: hide the progressbar. Defaults to ``False``. When no tty is + detected, it will only print the progressbar label. Setting this to + ``False`` also disables that. + :param show_eta: enables or disables the estimated time display. This is + automatically disabled if the length cannot be + determined. + :param show_percent: enables or disables the percentage display. The + default is `True` if the iterable has a length or + `False` if not. + :param show_pos: enables or disables the absolute position display. The + default is `False`. + :param item_show_func: A function called with the current item which + can return a string to show next to the progress bar. If the + function returns ``None`` nothing is shown. The current item can + be ``None``, such as when entering and exiting the bar. + :param fill_char: the character to use to show the filled part of the + progress bar. + :param empty_char: the character to use to show the non-filled part of + the progress bar. + :param bar_template: the format string to use as template for the bar. + The parameters in it are ``label`` for the label, + ``bar`` for the progress bar and ``info`` for the + info section. + :param info_sep: the separator between multiple info items (eta etc.) + :param width: the width of the progress bar in characters, 0 means full + terminal width + :param file: The file to write to. If this is not a terminal then + only the label is printed. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are included anywhere in the progress bar output + which is not the case by default. + :param update_min_steps: Render only when this many updates have + completed. This allows tuning for very fast iterators. + + .. versionadded:: 8.2 + The ``hidden`` argument. + + .. versionchanged:: 8.0 + Output is shown even if execution time is less than 0.5 seconds. + + .. versionchanged:: 8.0 + ``item_show_func`` shows the current item, not the previous one. + + .. versionchanged:: 8.0 + Labels are echoed if the output is not a TTY. Reverts a change + in 7.0 that removed all output. + + .. versionadded:: 8.0 + The ``update_min_steps`` parameter. + + .. versionadded:: 4.0 + The ``color`` parameter and ``update`` method. + + .. versionadded:: 2.0 + """ + from ._termui_impl import ProgressBar + + color = resolve_color_default(color) + return ProgressBar( + iterable=iterable, + length=length, + hidden=hidden, + show_eta=show_eta, + show_percent=show_percent, + show_pos=show_pos, + item_show_func=item_show_func, + fill_char=fill_char, + empty_char=empty_char, + bar_template=bar_template, + info_sep=info_sep, + file=file, + label=label, + width=width, + color=color, + update_min_steps=update_min_steps, + ) + + +def clear() -> None: + """Clears the terminal screen. This will have the effect of clearing + the whole visible space of the terminal and moving the cursor to the + top left. This does not do anything if not connected to a terminal. + + .. versionadded:: 2.0 + """ + if not isatty(sys.stdout): + return + + # ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor + echo("\033[2J\033[1;1H", nl=False) + + +def _interpret_color(color: int | tuple[int, int, int] | str, offset: int = 0) -> str: + if isinstance(color, int): + return f"{38 + offset};5;{color:d}" + + if isinstance(color, (tuple, list)): + r, g, b = color + return f"{38 + offset};2;{r:d};{g:d};{b:d}" + + return str(_ansi_colors[color] + offset) + + +def style( + text: t.Any, + fg: int | tuple[int, int, int] | str | None = None, + bg: int | tuple[int, int, int] | str | None = None, + bold: bool | None = None, + dim: bool | None = None, + underline: bool | None = None, + overline: bool | None = None, + italic: bool | None = None, + blink: bool | None = None, + reverse: bool | None = None, + strikethrough: bool | None = None, + reset: bool = True, +) -> str: + """Styles a text with ANSI styles and returns the new string. By + default the styling is self contained which means that at the end + of the string a reset code is issued. This can be prevented by + passing ``reset=False``. + + Examples:: + + click.echo(click.style('Hello World!', fg='green')) + click.echo(click.style('ATTENTION!', blink=True)) + click.echo(click.style('Some things', reverse=True, fg='cyan')) + click.echo(click.style('More colors', fg=(255, 12, 128), bg=117)) + + Supported color names: + + * ``black`` (might be a gray) + * ``red`` + * ``green`` + * ``yellow`` (might be an orange) + * ``blue`` + * ``magenta`` + * ``cyan`` + * ``white`` (might be light gray) + * ``bright_black`` + * ``bright_red`` + * ``bright_green`` + * ``bright_yellow`` + * ``bright_blue`` + * ``bright_magenta`` + * ``bright_cyan`` + * ``bright_white`` + * ``reset`` (reset the color code only) + + If the terminal supports it, color may also be specified as: + + - An integer in the interval [0, 255]. The terminal must support + 8-bit/256-color mode. + - An RGB tuple of three integers in [0, 255]. The terminal must + support 24-bit/true-color mode. + + See https://en.wikipedia.org/wiki/ANSI_color and + https://gist.github.com/XVilka/8346728 for more information. + + :param text: the string to style with ansi codes. + :param fg: if provided this will become the foreground color. + :param bg: if provided this will become the background color. + :param bold: if provided this will enable or disable bold mode. + :param dim: if provided this will enable or disable dim mode. This is + badly supported. + :param underline: if provided this will enable or disable underline. + :param overline: if provided this will enable or disable overline. + :param italic: if provided this will enable or disable italic. + :param blink: if provided this will enable or disable blinking. + :param reverse: if provided this will enable or disable inverse + rendering (foreground becomes background and the + other way round). + :param strikethrough: if provided this will enable or disable + striking through text. + :param reset: by default a reset-all code is added at the end of the + string which means that styles do not carry over. This + can be disabled to compose styles. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. + + .. versionchanged:: 8.0 + Added support for 256 and RGB color codes. + + .. versionchanged:: 8.0 + Added the ``strikethrough``, ``italic``, and ``overline`` + parameters. + + .. versionchanged:: 7.0 + Added support for bright colors. + + .. versionadded:: 2.0 + """ + if not isinstance(text, str): + text = str(text) + + bits = [] + + if fg: + try: + bits.append(f"\033[{_interpret_color(fg)}m") + except KeyError: + raise TypeError(f"Unknown color {fg!r}") from None + + if bg: + try: + bits.append(f"\033[{_interpret_color(bg, 10)}m") + except KeyError: + raise TypeError(f"Unknown color {bg!r}") from None + + if bold is not None: + bits.append(f"\033[{1 if bold else 22}m") + if dim is not None: + bits.append(f"\033[{2 if dim else 22}m") + if underline is not None: + bits.append(f"\033[{4 if underline else 24}m") + if overline is not None: + bits.append(f"\033[{53 if overline else 55}m") + if italic is not None: + bits.append(f"\033[{3 if italic else 23}m") + if blink is not None: + bits.append(f"\033[{5 if blink else 25}m") + if reverse is not None: + bits.append(f"\033[{7 if reverse else 27}m") + if strikethrough is not None: + bits.append(f"\033[{9 if strikethrough else 29}m") + bits.append(text) + if reset: + bits.append(_ansi_reset_all) + return "".join(bits) + + +def unstyle(text: str) -> str: + """Removes ANSI styling information from a string. Usually it's not + necessary to use this function as Click's echo function will + automatically remove styling if necessary. + + .. versionadded:: 2.0 + + :param text: the text to remove style information from. + """ + return strip_ansi(text) + + +def secho( + message: t.Any | None = None, + file: t.IO[t.AnyStr] | None = None, + nl: bool = True, + err: bool = False, + color: bool | None = None, + **styles: t.Any, +) -> None: + """This function combines :func:`echo` and :func:`style` into one + call. As such the following two calls are the same:: + + click.secho('Hello World!', fg='green') + click.echo(click.style('Hello World!', fg='green')) + + All keyword arguments are forwarded to the underlying functions + depending on which one they go with. + + Non-string types will be converted to :class:`str`. However, + :class:`bytes` are passed directly to :meth:`echo` without applying + style. If you want to style bytes that represent text, call + :meth:`bytes.decode` first. + + .. versionchanged:: 8.0 + A non-string ``message`` is converted to a string. Bytes are + passed through without style applied. + + .. versionadded:: 2.0 + """ + if message is not None and not isinstance(message, (bytes, bytearray)): + message = style(message, **styles) + + return echo(message, file=file, nl=nl, err=err, color=color) + + +@t.overload +def edit( + text: bytes | bytearray, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = False, + extension: str = ".txt", +) -> bytes | None: ... + + +@t.overload +def edit( + text: str, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", +) -> str | None: ... + + +@t.overload +def edit( + text: None = None, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + filename: str | cabc.Iterable[str] | None = None, +) -> None: ... + + +def edit( + text: str | bytes | bytearray | None = None, + editor: str | None = None, + env: cabc.Mapping[str, str] | None = None, + require_save: bool = True, + extension: str = ".txt", + filename: str | cabc.Iterable[str] | None = None, +) -> str | bytes | bytearray | None: + r"""Edits the given text in the defined editor. If an editor is given + (should be the full path to the executable but the regular operating + system search path is used for finding the executable) it overrides + the detected editor. Optionally, some environment variables can be + used. If the editor is closed without changes, `None` is returned. In + case a file is edited directly the return value is always `None` and + `require_save` and `extension` are ignored. + + If the editor cannot be opened a :exc:`UsageError` is raised. + + Note for Windows: to simplify cross-platform usage, the newlines are + automatically converted from POSIX to Windows and vice versa. As such, + the message here will have ``\n`` as newline markers. + + :param text: the text to edit. + :param editor: optionally the editor to use. Defaults to automatic + detection. + :param env: environment variables to forward to the editor. + :param require_save: if this is true, then not saving in the editor + will make the return value become `None`. + :param extension: the extension to tell the editor about. This defaults + to `.txt` but changing this might change syntax + highlighting. + :param filename: if provided it will edit this file instead of the + provided text contents. It will not use a temporary + file as an indirection in that case. If the editor supports + editing multiple files at once, a sequence of files may be + passed as well. Invoke `click.file` once per file instead + if multiple files cannot be managed at once or editing the + files serially is desired. + + .. versionchanged:: 8.2.0 + ``filename`` now accepts any ``Iterable[str]`` in addition to a ``str`` + if the ``editor`` supports editing multiple files at once. + + """ + from ._termui_impl import Editor + + ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension) + + if filename is None: + return ed.edit(text) + + if isinstance(filename, str): + filename = (filename,) + + ed.edit_files(filenames=filename) + return None + + +def launch(url: str, wait: bool = False, locate: bool = False) -> int: + """This function launches the given URL (or filename) in the default + viewer application for this file type. If this is an executable, it + might launch the executable in a new session. The return value is + the exit code of the launched application. Usually, ``0`` indicates + success. + + Examples:: + + click.launch('https://click.palletsprojects.com/') + click.launch('/my/downloaded/file', locate=True) + + .. versionadded:: 2.0 + + :param url: URL or filename of the thing to launch. + :param wait: Wait for the program to exit before returning. This + only works if the launched program blocks. In particular, + ``xdg-open`` on Linux does not block. + :param locate: if this is set to `True` then instead of launching the + application associated with the URL it will attempt to + launch a file manager with the file located. This + might have weird effects if the URL does not point to + the filesystem. + """ + from ._termui_impl import open_url + + return open_url(url, wait=wait, locate=locate) + + +# If this is provided, getchar() calls into this instead. This is used +# for unittesting purposes. +_getchar: t.Callable[[bool], str] | None = None + + +def getchar(echo: bool = False) -> str: + """Fetches a single character from the terminal and returns it. This + will always return a unicode character and under certain rare + circumstances this might return more than one character. The + situations which more than one character is returned is when for + whatever reason multiple characters end up in the terminal buffer or + standard input was not actually a terminal. + + Note that this will always read from the terminal, even if something + is piped into the standard input. + + Note for Windows: in rare cases when typing non-ASCII characters, this + function might wait for a second character and then return both at once. + This is because certain Unicode characters look like special-key markers. + + .. versionadded:: 2.0 + + :param echo: if set to `True`, the character read will also show up on + the terminal. The default is to not show it. + """ + global _getchar + + if _getchar is None: + from ._termui_impl import getchar as f + + _getchar = f + + return _getchar(echo) + + +def raw_terminal() -> AbstractContextManager[int]: + from ._termui_impl import raw_terminal as f + + return f() + + +def pause(info: str | None = None, err: bool = False) -> None: + """This command stops execution and waits for the user to press any + key to continue. This is similar to the Windows batch "pause" + command. If the program is not run through a terminal, this command + will instead do nothing. + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param info: The message to print before pausing. Defaults to + ``"Press any key to continue..."``. + :param err: if set to message goes to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + if not isatty(sys.stdin) or not isatty(sys.stdout): + return + + if info is None: + info = _("Press any key to continue...") + + try: + if info: + echo(info, nl=False, err=err) + try: + getchar() + except (KeyboardInterrupt, EOFError): + pass + finally: + if info: + echo(err=err) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/testing.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/testing.py new file mode 100644 index 0000000..f6f60b8 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/testing.py @@ -0,0 +1,577 @@ +from __future__ import annotations + +import collections.abc as cabc +import contextlib +import io +import os +import shlex +import sys +import tempfile +import typing as t +from types import TracebackType + +from . import _compat +from . import formatting +from . import termui +from . import utils +from ._compat import _find_binary_reader + +if t.TYPE_CHECKING: + from _typeshed import ReadableBuffer + + from .core import Command + + +class EchoingStdin: + def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None: + self._input = input + self._output = output + self._paused = False + + def __getattr__(self, x: str) -> t.Any: + return getattr(self._input, x) + + def _echo(self, rv: bytes) -> bytes: + if not self._paused: + self._output.write(rv) + + return rv + + def read(self, n: int = -1) -> bytes: + return self._echo(self._input.read(n)) + + def read1(self, n: int = -1) -> bytes: + return self._echo(self._input.read1(n)) # type: ignore + + def readline(self, n: int = -1) -> bytes: + return self._echo(self._input.readline(n)) + + def readlines(self) -> list[bytes]: + return [self._echo(x) for x in self._input.readlines()] + + def __iter__(self) -> cabc.Iterator[bytes]: + return iter(self._echo(x) for x in self._input) + + def __repr__(self) -> str: + return repr(self._input) + + +@contextlib.contextmanager +def _pause_echo(stream: EchoingStdin | None) -> cabc.Iterator[None]: + if stream is None: + yield + else: + stream._paused = True + yield + stream._paused = False + + +class BytesIOCopy(io.BytesIO): + """Patch ``io.BytesIO`` to let the written stream be copied to another. + + .. versionadded:: 8.2 + """ + + def __init__(self, copy_to: io.BytesIO) -> None: + super().__init__() + self.copy_to = copy_to + + def flush(self) -> None: + super().flush() + self.copy_to.flush() + + def write(self, b: ReadableBuffer) -> int: + self.copy_to.write(b) + return super().write(b) + + +class StreamMixer: + """Mixes `` and `` streams. + + The result is available in the ``output`` attribute. + + .. versionadded:: 8.2 + """ + + def __init__(self) -> None: + self.output: io.BytesIO = io.BytesIO() + self.stdout: io.BytesIO = BytesIOCopy(copy_to=self.output) + self.stderr: io.BytesIO = BytesIOCopy(copy_to=self.output) + + def __del__(self) -> None: + """ + Guarantee that embedded file-like objects are closed in a + predictable order, protecting against races between + self.output being closed and other streams being flushed on close + + .. versionadded:: 8.2.2 + """ + self.stderr.close() + self.stdout.close() + self.output.close() + + +class _NamedTextIOWrapper(io.TextIOWrapper): + def __init__( + self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any + ) -> None: + super().__init__(buffer, **kwargs) + self._name = name + self._mode = mode + + @property + def name(self) -> str: + return self._name + + @property + def mode(self) -> str: + return self._mode + + +def make_input_stream( + input: str | bytes | t.IO[t.Any] | None, charset: str +) -> t.BinaryIO: + # Is already an input stream. + if hasattr(input, "read"): + rv = _find_binary_reader(t.cast("t.IO[t.Any]", input)) + + if rv is not None: + return rv + + raise TypeError("Could not find binary reader for input stream.") + + if input is None: + input = b"" + elif isinstance(input, str): + input = input.encode(charset) + + return io.BytesIO(input) + + +class Result: + """Holds the captured result of an invoked CLI script. + + :param runner: The runner that created the result + :param stdout_bytes: The standard output as bytes. + :param stderr_bytes: The standard error as bytes. + :param output_bytes: A mix of ``stdout_bytes`` and ``stderr_bytes``, as the + user would see it in its terminal. + :param return_value: The value returned from the invoked command. + :param exit_code: The exit code as integer. + :param exception: The exception that happened if one did. + :param exc_info: Exception information (exception type, exception instance, + traceback type). + + .. versionchanged:: 8.2 + ``stderr_bytes`` no longer optional, ``output_bytes`` introduced and + ``mix_stderr`` has been removed. + + .. versionadded:: 8.0 + Added ``return_value``. + """ + + def __init__( + self, + runner: CliRunner, + stdout_bytes: bytes, + stderr_bytes: bytes, + output_bytes: bytes, + return_value: t.Any, + exit_code: int, + exception: BaseException | None, + exc_info: tuple[type[BaseException], BaseException, TracebackType] + | None = None, + ): + self.runner = runner + self.stdout_bytes = stdout_bytes + self.stderr_bytes = stderr_bytes + self.output_bytes = output_bytes + self.return_value = return_value + self.exit_code = exit_code + self.exception = exception + self.exc_info = exc_info + + @property + def output(self) -> str: + """The terminal output as unicode string, as the user would see it. + + .. versionchanged:: 8.2 + No longer a proxy for ``self.stdout``. Now has its own independent stream + that is mixing `` and ``, in the order they were written. + """ + return self.output_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + @property + def stdout(self) -> str: + """The standard output as unicode string.""" + return self.stdout_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + @property + def stderr(self) -> str: + """The standard error as unicode string. + + .. versionchanged:: 8.2 + No longer raise an exception, always returns the `` string. + """ + return self.stderr_bytes.decode(self.runner.charset, "replace").replace( + "\r\n", "\n" + ) + + def __repr__(self) -> str: + exc_str = repr(self.exception) if self.exception else "okay" + return f"<{type(self).__name__} {exc_str}>" + + +class CliRunner: + """The CLI runner provides functionality to invoke a Click command line + script for unittesting purposes in a isolated environment. This only + works in single-threaded systems without any concurrency as it changes the + global interpreter state. + + :param charset: the character set for the input and output data. + :param env: a dictionary with environment variables for overriding. + :param echo_stdin: if this is set to `True`, then reading from `` writes + to ``. This is useful for showing examples in + some circumstances. Note that regular prompts + will automatically echo the input. + :param catch_exceptions: Whether to catch any exceptions other than + ``SystemExit`` when running :meth:`~CliRunner.invoke`. + + .. versionchanged:: 8.2 + Added the ``catch_exceptions`` parameter. + + .. versionchanged:: 8.2 + ``mix_stderr`` parameter has been removed. + """ + + def __init__( + self, + charset: str = "utf-8", + env: cabc.Mapping[str, str | None] | None = None, + echo_stdin: bool = False, + catch_exceptions: bool = True, + ) -> None: + self.charset = charset + self.env: cabc.Mapping[str, str | None] = env or {} + self.echo_stdin = echo_stdin + self.catch_exceptions = catch_exceptions + + def get_default_prog_name(self, cli: Command) -> str: + """Given a command object it will return the default program name + for it. The default is the `name` attribute or ``"root"`` if not + set. + """ + return cli.name or "root" + + def make_env( + self, overrides: cabc.Mapping[str, str | None] | None = None + ) -> cabc.Mapping[str, str | None]: + """Returns the environment overrides for invoking a script.""" + rv = dict(self.env) + if overrides: + rv.update(overrides) + return rv + + @contextlib.contextmanager + def isolation( + self, + input: str | bytes | t.IO[t.Any] | None = None, + env: cabc.Mapping[str, str | None] | None = None, + color: bool = False, + ) -> cabc.Iterator[tuple[io.BytesIO, io.BytesIO, io.BytesIO]]: + """A context manager that sets up the isolation for invoking of a + command line tool. This sets up `` with the given input data + and `os.environ` with the overrides from the given dictionary. + This also rebinds some internals in Click to be mocked (like the + prompt functionality). + + This is automatically done in the :meth:`invoke` method. + + :param input: the input stream to put into `sys.stdin`. + :param env: the environment overrides as dictionary. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionadded:: 8.2 + An additional output stream is returned, which is a mix of + `` and `` streams. + + .. versionchanged:: 8.2 + Always returns the `` stream. + + .. versionchanged:: 8.0 + `` is opened with ``errors="backslashreplace"`` + instead of the default ``"strict"``. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + """ + bytes_input = make_input_stream(input, self.charset) + echo_input = None + + old_stdin = sys.stdin + old_stdout = sys.stdout + old_stderr = sys.stderr + old_forced_width = formatting.FORCED_WIDTH + formatting.FORCED_WIDTH = 80 + + env = self.make_env(env) + + stream_mixer = StreamMixer() + + if self.echo_stdin: + bytes_input = echo_input = t.cast( + t.BinaryIO, EchoingStdin(bytes_input, stream_mixer.stdout) + ) + + sys.stdin = text_input = _NamedTextIOWrapper( + bytes_input, encoding=self.charset, name="", mode="r" + ) + + if self.echo_stdin: + # Force unbuffered reads, otherwise TextIOWrapper reads a + # large chunk which is echoed early. + text_input._CHUNK_SIZE = 1 # type: ignore + + sys.stdout = _NamedTextIOWrapper( + stream_mixer.stdout, encoding=self.charset, name="", mode="w" + ) + + sys.stderr = _NamedTextIOWrapper( + stream_mixer.stderr, + encoding=self.charset, + name="", + mode="w", + errors="backslashreplace", + ) + + @_pause_echo(echo_input) # type: ignore + def visible_input(prompt: str | None = None) -> str: + sys.stdout.write(prompt or "") + try: + val = next(text_input).rstrip("\r\n") + except StopIteration as e: + raise EOFError() from e + sys.stdout.write(f"{val}\n") + sys.stdout.flush() + return val + + @_pause_echo(echo_input) # type: ignore + def hidden_input(prompt: str | None = None) -> str: + sys.stdout.write(f"{prompt or ''}\n") + sys.stdout.flush() + try: + return next(text_input).rstrip("\r\n") + except StopIteration as e: + raise EOFError() from e + + @_pause_echo(echo_input) # type: ignore + def _getchar(echo: bool) -> str: + char = sys.stdin.read(1) + + if echo: + sys.stdout.write(char) + + sys.stdout.flush() + return char + + default_color = color + + def should_strip_ansi( + stream: t.IO[t.Any] | None = None, color: bool | None = None + ) -> bool: + if color is None: + return not default_color + return not color + + old_visible_prompt_func = termui.visible_prompt_func + old_hidden_prompt_func = termui.hidden_prompt_func + old__getchar_func = termui._getchar + old_should_strip_ansi = utils.should_strip_ansi # type: ignore + old__compat_should_strip_ansi = _compat.should_strip_ansi + termui.visible_prompt_func = visible_input + termui.hidden_prompt_func = hidden_input + termui._getchar = _getchar + utils.should_strip_ansi = should_strip_ansi # type: ignore + _compat.should_strip_ansi = should_strip_ansi + + old_env = {} + try: + for key, value in env.items(): + old_env[key] = os.environ.get(key) + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + yield (stream_mixer.stdout, stream_mixer.stderr, stream_mixer.output) + finally: + for key, value in old_env.items(): + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + sys.stdout = old_stdout + sys.stderr = old_stderr + sys.stdin = old_stdin + termui.visible_prompt_func = old_visible_prompt_func + termui.hidden_prompt_func = old_hidden_prompt_func + termui._getchar = old__getchar_func + utils.should_strip_ansi = old_should_strip_ansi # type: ignore + _compat.should_strip_ansi = old__compat_should_strip_ansi + formatting.FORCED_WIDTH = old_forced_width + + def invoke( + self, + cli: Command, + args: str | cabc.Sequence[str] | None = None, + input: str | bytes | t.IO[t.Any] | None = None, + env: cabc.Mapping[str, str | None] | None = None, + catch_exceptions: bool | None = None, + color: bool = False, + **extra: t.Any, + ) -> Result: + """Invokes a command in an isolated environment. The arguments are + forwarded directly to the command line script, the `extra` keyword + arguments are passed to the :meth:`~clickpkg.Command.main` function of + the command. + + This returns a :class:`Result` object. + + :param cli: the command to invoke + :param args: the arguments to invoke. It may be given as an iterable + or a string. When given as string it will be interpreted + as a Unix shell command. More details at + :func:`shlex.split`. + :param input: the input data for `sys.stdin`. + :param env: the environment overrides. + :param catch_exceptions: Whether to catch any other exceptions than + ``SystemExit``. If :data:`None`, the value + from :class:`CliRunner` is used. + :param extra: the keyword arguments to pass to :meth:`main`. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + + .. versionadded:: 8.2 + The result object has the ``output_bytes`` attribute with + the mix of ``stdout_bytes`` and ``stderr_bytes``, as the user would + see it in its terminal. + + .. versionchanged:: 8.2 + The result object always returns the ``stderr_bytes`` stream. + + .. versionchanged:: 8.0 + The result object has the ``return_value`` attribute with + the value returned from the invoked command. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionchanged:: 3.0 + Added the ``catch_exceptions`` parameter. + + .. versionchanged:: 3.0 + The result object has the ``exc_info`` attribute with the + traceback if available. + """ + exc_info = None + if catch_exceptions is None: + catch_exceptions = self.catch_exceptions + + with self.isolation(input=input, env=env, color=color) as outstreams: + return_value = None + exception: BaseException | None = None + exit_code = 0 + + if isinstance(args, str): + args = shlex.split(args) + + try: + prog_name = extra.pop("prog_name") + except KeyError: + prog_name = self.get_default_prog_name(cli) + + try: + return_value = cli.main(args=args or (), prog_name=prog_name, **extra) + except SystemExit as e: + exc_info = sys.exc_info() + e_code = t.cast("int | t.Any | None", e.code) + + if e_code is None: + e_code = 0 + + if e_code != 0: + exception = e + + if not isinstance(e_code, int): + sys.stdout.write(str(e_code)) + sys.stdout.write("\n") + e_code = 1 + + exit_code = e_code + + except Exception as e: + if not catch_exceptions: + raise + exception = e + exit_code = 1 + exc_info = sys.exc_info() + finally: + sys.stdout.flush() + sys.stderr.flush() + stdout = outstreams[0].getvalue() + stderr = outstreams[1].getvalue() + output = outstreams[2].getvalue() + + return Result( + runner=self, + stdout_bytes=stdout, + stderr_bytes=stderr, + output_bytes=output, + return_value=return_value, + exit_code=exit_code, + exception=exception, + exc_info=exc_info, # type: ignore + ) + + @contextlib.contextmanager + def isolated_filesystem( + self, temp_dir: str | os.PathLike[str] | None = None + ) -> cabc.Iterator[str]: + """A context manager that creates a temporary directory and + changes the current working directory to it. This isolates tests + that affect the contents of the CWD to prevent them from + interfering with each other. + + :param temp_dir: Create the temporary directory under this + directory. If given, the created directory is not removed + when exiting. + + .. versionchanged:: 8.0 + Added the ``temp_dir`` parameter. + """ + cwd = os.getcwd() + dt = tempfile.mkdtemp(dir=temp_dir) + os.chdir(dt) + + try: + yield dt + finally: + os.chdir(cwd) + + if temp_dir is None: + import shutil + + try: + shutil.rmtree(dt) + except OSError: + pass diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/types.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/types.py new file mode 100644 index 0000000..e71c1c2 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/types.py @@ -0,0 +1,1209 @@ +from __future__ import annotations + +import collections.abc as cabc +import enum +import os +import stat +import sys +import typing as t +from datetime import datetime +from gettext import gettext as _ +from gettext import ngettext + +from ._compat import _get_argv_encoding +from ._compat import open_stream +from .exceptions import BadParameter +from .utils import format_filename +from .utils import LazyFile +from .utils import safecall + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .core import Context + from .core import Parameter + from .shell_completion import CompletionItem + +ParamTypeValue = t.TypeVar("ParamTypeValue") + + +class ParamType: + """Represents the type of a parameter. Validates and converts values + from the command line or Python into the correct type. + + To implement a custom type, subclass and implement at least the + following: + + - The :attr:`name` class attribute must be set. + - Calling an instance of the type with ``None`` must return + ``None``. This is already implemented by default. + - :meth:`convert` must convert string values to the correct type. + - :meth:`convert` must accept values that are already the correct + type. + - It must be able to convert a value if the ``ctx`` and ``param`` + arguments are ``None``. This can occur when converting prompt + input. + """ + + is_composite: t.ClassVar[bool] = False + arity: t.ClassVar[int] = 1 + + #: the descriptive name of this type + name: str + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter: t.ClassVar[str | None] = None + + def to_info_dict(self) -> dict[str, t.Any]: + """Gather information that could be useful for a tool generating + user-facing documentation. + + Use :meth:`click.Context.to_info_dict` to traverse the entire + CLI structure. + + .. versionadded:: 8.0 + """ + # The class name without the "ParamType" suffix. + param_type = type(self).__name__.partition("ParamType")[0] + param_type = param_type.partition("ParameterType")[0] + + # Custom subclasses might not remember to set a name. + if hasattr(self, "name"): + name = self.name + else: + name = param_type + + return {"param_type": param_type, "name": name} + + def __call__( + self, + value: t.Any, + param: Parameter | None = None, + ctx: Context | None = None, + ) -> t.Any: + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param: Parameter, ctx: Context | None) -> str | None: + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + """Convert the value to the correct type. This is not called if + the value is ``None`` (the missing value). + + This must accept string values from the command line, as well as + values that are already the correct type. It may also convert + other compatible types. + + The ``param`` and ``ctx`` arguments may be ``None`` in certain + situations, such as when converting prompt input. + + If the value cannot be converted, call :meth:`fail` with a + descriptive message. + + :param value: The value to convert. + :param param: The parameter that is using this type to convert + its value. May be ``None``. + :param ctx: The current context that arrived at this value. May + be ``None``. + """ + return value + + def split_envvar_value(self, rv: str) -> cabc.Sequence[str]: + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or "").split(self.envvar_list_splitter) + + def fail( + self, + message: str, + param: Parameter | None = None, + ctx: Context | None = None, + ) -> t.NoReturn: + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a list of + :class:`~click.shell_completion.CompletionItem` objects for the + incomplete value. Most types do not provide completions, but + some do, and this allows custom types to provide custom + completions as well. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + return [] + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self) -> int: # type: ignore + raise NotImplementedError() + + +class FuncParamType(ParamType): + def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None: + self.name: str = func.__name__ + self.func = func + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["func"] = self.func + return info_dict + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + try: + return self.func(value) + except ValueError: + try: + value = str(value) + except UnicodeError: + value = value.decode("utf-8", "replace") + + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + return value + + def __repr__(self) -> str: + return "UNPROCESSED" + + +class StringParamType(ParamType): + name = "text" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = sys.getfilesystemencoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode("utf-8", "replace") + else: + value = value.decode("utf-8", "replace") + return value + return str(value) + + def __repr__(self) -> str: + return "STRING" + + +class Choice(ParamType, t.Generic[ParamTypeValue]): + """The choice type allows a value to be checked against a fixed set + of supported values. + + You may pass any iterable value which will be converted to a tuple + and thus will only be iterated once. + + The resulting value will always be one of the originally passed choices. + See :meth:`normalize_choice` for more info on the mapping of strings + to choices. See :ref:`choice-opts` for an example. + + :param case_sensitive: Set to false to make choices case + insensitive. Defaults to true. + + .. versionchanged:: 8.2.0 + Non-``str`` ``choices`` are now supported. It can additionally be any + iterable. Before you were not recommended to pass anything but a list or + tuple. + + .. versionadded:: 8.2.0 + Choice normalization can be overridden via :meth:`normalize_choice`. + """ + + name = "choice" + + def __init__( + self, choices: cabc.Iterable[ParamTypeValue], case_sensitive: bool = True + ) -> None: + self.choices: cabc.Sequence[ParamTypeValue] = tuple(choices) + self.case_sensitive = case_sensitive + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["choices"] = self.choices + info_dict["case_sensitive"] = self.case_sensitive + return info_dict + + def _normalized_mapping( + self, ctx: Context | None = None + ) -> cabc.Mapping[ParamTypeValue, str]: + """ + Returns mapping where keys are the original choices and the values are + the normalized values that are accepted via the command line. + + This is a simple wrapper around :meth:`normalize_choice`, use that + instead which is supported. + """ + return { + choice: self.normalize_choice( + choice=choice, + ctx=ctx, + ) + for choice in self.choices + } + + def normalize_choice(self, choice: ParamTypeValue, ctx: Context | None) -> str: + """ + Normalize a choice value, used to map a passed string to a choice. + Each choice must have a unique normalized value. + + By default uses :meth:`Context.token_normalize_func` and if not case + sensitive, convert it to a casefolded value. + + .. versionadded:: 8.2.0 + """ + normed_value = choice.name if isinstance(choice, enum.Enum) else str(choice) + + if ctx is not None and ctx.token_normalize_func is not None: + normed_value = ctx.token_normalize_func(normed_value) + + if not self.case_sensitive: + normed_value = normed_value.casefold() + + return normed_value + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + if param.param_type_name == "option" and not param.show_choices: # type: ignore + choice_metavars = [ + convert_type(type(choice)).name.upper() for choice in self.choices + ] + choices_str = "|".join([*dict.fromkeys(choice_metavars)]) + else: + choices_str = "|".join( + [str(i) for i in self._normalized_mapping(ctx=ctx).values()] + ) + + # Use curly braces to indicate a required argument. + if param.required and param.param_type_name == "argument": + return f"{{{choices_str}}}" + + # Use square braces to indicate an option or optional argument. + return f"[{choices_str}]" + + def get_missing_message(self, param: Parameter, ctx: Context | None) -> str: + """ + Message shown when no choice is passed. + + .. versionchanged:: 8.2.0 Added ``ctx`` argument. + """ + return _("Choose from:\n\t{choices}").format( + choices=",\n\t".join(self._normalized_mapping(ctx=ctx).values()) + ) + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> ParamTypeValue: + """ + For a given value from the parser, normalize it and find its + matching normalized value in the list of choices. Then return the + matched "original" choice. + """ + normed_value = self.normalize_choice(choice=value, ctx=ctx) + normalized_mapping = self._normalized_mapping(ctx=ctx) + + try: + return next( + original + for original, normalized in normalized_mapping.items() + if normalized == normed_value + ) + except StopIteration: + self.fail( + self.get_invalid_choice_message(value=value, ctx=ctx), + param=param, + ctx=ctx, + ) + + def get_invalid_choice_message(self, value: t.Any, ctx: Context | None) -> str: + """Get the error message when the given choice is invalid. + + :param value: The invalid value. + + .. versionadded:: 8.2 + """ + choices_str = ", ".join(map(repr, self._normalized_mapping(ctx=ctx).values())) + return ngettext( + "{value!r} is not {choice}.", + "{value!r} is not one of {choices}.", + len(self.choices), + ).format(value=value, choice=choices_str, choices=choices_str) + + def __repr__(self) -> str: + return f"Choice({list(self.choices)})" + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Complete choices that start with the incomplete value. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + str_choices = map(str, self.choices) + + if self.case_sensitive: + matched = (c for c in str_choices if c.startswith(incomplete)) + else: + incomplete = incomplete.lower() + matched = (c for c in str_choices if c.lower().startswith(incomplete)) + + return [CompletionItem(c) for c in matched] + + +class DateTime(ParamType): + """The DateTime type converts date strings into `datetime` objects. + + The format strings which are checked are configurable, but default to some + common (non-timezone aware) ISO 8601 formats. + + When specifying *DateTime* formats, you should only pass a list or a tuple. + Other iterables, like generators, may lead to surprising results. + + The format strings are processed using ``datetime.strptime``, and this + consequently defines the format strings which are allowed. + + Parsing is tried using each format, in order, and the first format which + parses successfully is used. + + :param formats: A list or tuple of date format strings, in the order in + which they should be tried. Defaults to + ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, + ``'%Y-%m-%d %H:%M:%S'``. + """ + + name = "datetime" + + def __init__(self, formats: cabc.Sequence[str] | None = None): + self.formats: cabc.Sequence[str] = formats or [ + "%Y-%m-%d", + "%Y-%m-%dT%H:%M:%S", + "%Y-%m-%d %H:%M:%S", + ] + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["formats"] = self.formats + return info_dict + + def get_metavar(self, param: Parameter, ctx: Context) -> str | None: + return f"[{'|'.join(self.formats)}]" + + def _try_to_convert_date(self, value: t.Any, format: str) -> datetime | None: + try: + return datetime.strptime(value, format) + except ValueError: + return None + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + if isinstance(value, datetime): + return value + + for format in self.formats: + converted = self._try_to_convert_date(value, format) + + if converted is not None: + return converted + + formats_str = ", ".join(map(repr, self.formats)) + self.fail( + ngettext( + "{value!r} does not match the format {format}.", + "{value!r} does not match the formats {formats}.", + len(self.formats), + ).format(value=value, format=formats_str, formats=formats_str), + param, + ctx, + ) + + def __repr__(self) -> str: + return "DateTime" + + +class _NumberParamTypeBase(ParamType): + _number_class: t.ClassVar[type[t.Any]] + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + try: + return self._number_class(value) + except ValueError: + self.fail( + _("{value!r} is not a valid {number_type}.").format( + value=value, number_type=self.name + ), + param, + ctx, + ) + + +class _NumberRangeBase(_NumberParamTypeBase): + def __init__( + self, + min: float | None = None, + max: float | None = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + self.min = min + self.max = max + self.min_open = min_open + self.max_open = max_open + self.clamp = clamp + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + min=self.min, + max=self.max, + min_open=self.min_open, + max_open=self.max_open, + clamp=self.clamp, + ) + return info_dict + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + import operator + + rv = super().convert(value, param, ctx) + lt_min: bool = self.min is not None and ( + operator.le if self.min_open else operator.lt + )(rv, self.min) + gt_max: bool = self.max is not None and ( + operator.ge if self.max_open else operator.gt + )(rv, self.max) + + if self.clamp: + if lt_min: + return self._clamp(self.min, 1, self.min_open) # type: ignore + + if gt_max: + return self._clamp(self.max, -1, self.max_open) # type: ignore + + if lt_min or gt_max: + self.fail( + _("{value} is not in the range {range}.").format( + value=rv, range=self._describe_range() + ), + param, + ctx, + ) + + return rv + + def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float: + """Find the valid value to clamp to bound in the given + direction. + + :param bound: The boundary value. + :param dir: 1 or -1 indicating the direction to move. + :param open: If true, the range does not include the bound. + """ + raise NotImplementedError + + def _describe_range(self) -> str: + """Describe the range for use in help text.""" + if self.min is None: + op = "<" if self.max_open else "<=" + return f"x{op}{self.max}" + + if self.max is None: + op = ">" if self.min_open else ">=" + return f"x{op}{self.min}" + + lop = "<" if self.min_open else "<=" + rop = "<" if self.max_open else "<=" + return f"{self.min}{lop}x{rop}{self.max}" + + def __repr__(self) -> str: + clamp = " clamped" if self.clamp else "" + return f"<{type(self).__name__} {self._describe_range()}{clamp}>" + + +class IntParamType(_NumberParamTypeBase): + name = "integer" + _number_class = int + + def __repr__(self) -> str: + return "INT" + + +class IntRange(_NumberRangeBase, IntParamType): + """Restrict an :data:`click.INT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "integer range" + + def _clamp( # type: ignore + self, bound: int, dir: t.Literal[1, -1], open: bool + ) -> int: + if not open: + return bound + + return bound + dir + + +class FloatParamType(_NumberParamTypeBase): + name = "float" + _number_class = float + + def __repr__(self) -> str: + return "FLOAT" + + +class FloatRange(_NumberRangeBase, FloatParamType): + """Restrict a :data:`click.FLOAT` value to a range of accepted + values. See :ref:`ranges`. + + If ``min`` or ``max`` are not passed, any value is accepted in that + direction. If ``min_open`` or ``max_open`` are enabled, the + corresponding boundary is not included in the range. + + If ``clamp`` is enabled, a value outside the range is clamped to the + boundary instead of failing. This is not supported if either + boundary is marked ``open``. + + .. versionchanged:: 8.0 + Added the ``min_open`` and ``max_open`` parameters. + """ + + name = "float range" + + def __init__( + self, + min: float | None = None, + max: float | None = None, + min_open: bool = False, + max_open: bool = False, + clamp: bool = False, + ) -> None: + super().__init__( + min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp + ) + + if (min_open or max_open) and clamp: + raise TypeError("Clamping is not supported for open bounds.") + + def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float: + if not open: + return bound + + # Could use math.nextafter here, but clamping an + # open float range doesn't seem to be particularly useful. It's + # left up to the user to write a callback to do it if needed. + raise RuntimeError("Clamping is not supported for open bounds.") + + +class BoolParamType(ParamType): + name = "boolean" + + bool_states: dict[str, bool] = { + "1": True, + "0": False, + "yes": True, + "no": False, + "true": True, + "false": False, + "on": True, + "off": False, + "t": True, + "f": False, + "y": True, + "n": False, + # Absence of value is considered False. + "": False, + } + """A mapping of string values to boolean states. + + Mapping is inspired by :py:attr:`configparser.ConfigParser.BOOLEAN_STATES` + and extends it. + + .. caution:: + String values are lower-cased, as the ``str_to_bool`` comparison function + below is case-insensitive. + + .. warning:: + The mapping is not exhaustive, and does not cover all possible boolean strings + representations. It will remains as it is to avoid endless bikeshedding. + + Future work my be considered to make this mapping user-configurable from public + API. + """ + + @staticmethod + def str_to_bool(value: str | bool) -> bool | None: + """Convert a string to a boolean value. + + If the value is already a boolean, it is returned as-is. If the value is a + string, it is stripped of whitespaces and lower-cased, then checked against + the known boolean states pre-defined in the `BoolParamType.bool_states` mapping + above. + + Returns `None` if the value does not match any known boolean state. + """ + if isinstance(value, bool): + return value + return BoolParamType.bool_states.get(value.strip().lower()) + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> bool: + normalized = self.str_to_bool(value) + if normalized is None: + self.fail( + _( + "{value!r} is not a valid boolean. Recognized values: {states}" + ).format(value=value, states=", ".join(sorted(self.bool_states))), + param, + ctx, + ) + return normalized + + def __repr__(self) -> str: + return "BOOL" + + +class UUIDParameterType(ParamType): + name = "uuid" + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + import uuid + + if isinstance(value, uuid.UUID): + return value + + value = value.strip() + + try: + return uuid.UUID(value) + except ValueError: + self.fail( + _("{value!r} is not a valid UUID.").format(value=value), param, ctx + ) + + def __repr__(self) -> str: + return "UUID" + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or upon + first IO. The default is to be non-lazy for standard input and output + streams as well as files opened for reading, `lazy` otherwise. When opening a + file lazily for reading, it is still opened temporarily for validation, but + will not be held open until first IO. lazy is mainly useful when opening + for writing to avoid creating the file until it is needed. + + Files can also be opened atomically in which case all writes go into a + separate file in the same folder and upon completion the file will + be moved over to the original location. This is useful if a file + regularly read by other users is modified. + + See :ref:`file-args` for more information. + + .. versionchanged:: 2.0 + Added the ``atomic`` parameter. + """ + + name = "filename" + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + lazy: bool | None = None, + atomic: bool = False, + ) -> None: + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update(mode=self.mode, encoding=self.encoding) + return info_dict + + def resolve_lazy_flag(self, value: str | os.PathLike[str]) -> bool: + if self.lazy is not None: + return self.lazy + if os.fspath(value) == "-": + return False + elif "w" in self.mode: + return True + return False + + def convert( + self, + value: str | os.PathLike[str] | t.IO[t.Any], + param: Parameter | None, + ctx: Context | None, + ) -> t.IO[t.Any]: + if _is_file_like(value): + return value + + value = t.cast("str | os.PathLike[str]", value) + + try: + lazy = self.resolve_lazy_flag(value) + + if lazy: + lf = LazyFile( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + if ctx is not None: + ctx.call_on_close(lf.close_intelligently) + + return t.cast("t.IO[t.Any]", lf) + + f, should_close = open_stream( + value, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + + return f + except OSError as e: + self.fail(f"'{format_filename(value)}': {e.strerror}", param, ctx) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a special completion marker that tells the completion + system to use the shell to provide file path completions. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + return [CompletionItem(incomplete, type="file")] + + +def _is_file_like(value: t.Any) -> te.TypeGuard[t.IO[t.Any]]: + return hasattr(value, "read") or hasattr(value, "write") + + +class Path(ParamType): + """The ``Path`` type is similar to the :class:`File` type, but + returns the filename instead of an open file. Various checks can be + enabled to validate the type of file and permissions. + + :param exists: The file or directory needs to exist for the value to + be valid. If this is not set to ``True``, and the file does not + exist, then all further checks are silently skipped. + :param file_okay: Allow a file as a value. + :param dir_okay: Allow a directory as a value. + :param readable: if true, a readable check is performed. + :param writable: if true, a writable check is performed. + :param executable: if true, an executable check is performed. + :param resolve_path: Make the value absolute and resolve any + symlinks. A ``~`` is not expanded, as this is supposed to be + done by the shell only. + :param allow_dash: Allow a single dash as a value, which indicates + a standard stream (but does not open it). Use + :func:`~click.open_file` to handle opening this value. + :param path_type: Convert the incoming path value to this type. If + ``None``, keep Python's default, which is ``str``. Useful to + convert to :class:`pathlib.Path`. + + .. versionchanged:: 8.1 + Added the ``executable`` parameter. + + .. versionchanged:: 8.0 + Allow passing ``path_type=pathlib.Path``. + + .. versionchanged:: 6.0 + Added the ``allow_dash`` parameter. + """ + + envvar_list_splitter: t.ClassVar[str] = os.path.pathsep + + def __init__( + self, + exists: bool = False, + file_okay: bool = True, + dir_okay: bool = True, + writable: bool = False, + readable: bool = True, + resolve_path: bool = False, + allow_dash: bool = False, + path_type: type[t.Any] | None = None, + executable: bool = False, + ): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.readable = readable + self.writable = writable + self.executable = executable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name: str = _("file") + elif self.dir_okay and not self.file_okay: + self.name = _("directory") + else: + self.name = _("path") + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict.update( + exists=self.exists, + file_okay=self.file_okay, + dir_okay=self.dir_okay, + writable=self.writable, + readable=self.readable, + allow_dash=self.allow_dash, + ) + return info_dict + + def coerce_path_result( + self, value: str | os.PathLike[str] + ) -> str | bytes | os.PathLike[str]: + if self.type is not None and not isinstance(value, self.type): + if self.type is str: + return os.fsdecode(value) + elif self.type is bytes: + return os.fsencode(value) + else: + return t.cast("os.PathLike[str]", self.type(value)) + + return value + + def convert( + self, + value: str | os.PathLike[str], + param: Parameter | None, + ctx: Context | None, + ) -> str | bytes | os.PathLike[str]: + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") + + if not is_dash: + if self.resolve_path: + rv = os.path.realpath(rv) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail( + _("{name} {filename!r} does not exist.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail( + _("{name} {filename!r} is a file.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail( + _("{name} {filename!r} is a directory.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.readable and not os.access(rv, os.R_OK): + self.fail( + _("{name} {filename!r} is not readable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.writable and not os.access(rv, os.W_OK): + self.fail( + _("{name} {filename!r} is not writable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + if self.executable and not os.access(value, os.X_OK): + self.fail( + _("{name} {filename!r} is not executable.").format( + name=self.name.title(), filename=format_filename(value) + ), + param, + ctx, + ) + + return self.coerce_path_result(rv) + + def shell_complete( + self, ctx: Context, param: Parameter, incomplete: str + ) -> list[CompletionItem]: + """Return a special completion marker that tells the completion + system to use the shell to provide path completions for only + directories or any paths. + + :param ctx: Invocation context for this command. + :param param: The parameter that is requesting completion. + :param incomplete: Value being completed. May be empty. + + .. versionadded:: 8.0 + """ + from click.shell_completion import CompletionItem + + type = "dir" if self.dir_okay and not self.file_okay else "file" + return [CompletionItem(incomplete, type=type)] + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types: cabc.Sequence[type[t.Any] | ParamType]) -> None: + self.types: cabc.Sequence[ParamType] = [convert_type(ty) for ty in types] + + def to_info_dict(self) -> dict[str, t.Any]: + info_dict = super().to_info_dict() + info_dict["types"] = [t.to_info_dict() for t in self.types] + return info_dict + + @property + def name(self) -> str: # type: ignore + return f"<{' '.join(ty.name for ty in self.types)}>" + + @property + def arity(self) -> int: # type: ignore + return len(self.types) + + def convert( + self, value: t.Any, param: Parameter | None, ctx: Context | None + ) -> t.Any: + len_type = len(self.types) + len_value = len(value) + + if len_value != len_type: + self.fail( + ngettext( + "{len_type} values are required, but {len_value} was given.", + "{len_type} values are required, but {len_value} were given.", + len_value, + ).format(len_type=len_type, len_value=len_value), + param=param, + ctx=ctx, + ) + + return tuple( + ty(x, param, ctx) for ty, x in zip(self.types, value, strict=False) + ) + + +def convert_type(ty: t.Any | None, default: t.Any | None = None) -> ParamType: + """Find the most appropriate :class:`ParamType` for the given Python + type. If the type isn't provided, it can be inferred from a default + value. + """ + guessed_type = False + + if ty is None and default is not None: + if isinstance(default, (tuple, list)): + # If the default is empty, ty will remain None and will + # return STRING. + if default: + item = default[0] + + # A tuple of tuples needs to detect the inner types. + # Can't call convert recursively because that would + # incorrectly unwind the tuple to a single type. + if isinstance(item, (tuple, list)): + ty = tuple(map(type, item)) + else: + ty = type(item) + else: + ty = type(default) + + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + + if isinstance(ty, ParamType): + return ty + + if ty is str or ty is None: + return STRING + + if ty is int: + return INT + + if ty is float: + return FLOAT + + if ty is bool: + return BOOL + + if guessed_type: + return STRING + + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError( + f"Attempted to use an uninstantiated parameter type ({ty})." + ) + except TypeError: + # ty is an instance (correct), so issubclass fails. + pass + + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but +#: internally no string conversion takes place if the input was bytes. +#: This is usually useful when working with file paths as they can +#: appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() + + +class OptionHelpExtra(t.TypedDict, total=False): + envvars: tuple[str, ...] + default: str + range: str + required: str diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/click/utils.py b/tools/converter-generator/venv/lib/python3.11/site-packages/click/utils.py new file mode 100644 index 0000000..beae26f --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/click/utils.py @@ -0,0 +1,627 @@ +from __future__ import annotations + +import collections.abc as cabc +import os +import re +import sys +import typing as t +from functools import update_wrapper +from types import ModuleType +from types import TracebackType + +from ._compat import _default_text_stderr +from ._compat import _default_text_stdout +from ._compat import _find_binary_writer +from ._compat import auto_wrap_for_ansi +from ._compat import binary_streams +from ._compat import open_stream +from ._compat import should_strip_ansi +from ._compat import strip_ansi +from ._compat import text_streams +from ._compat import WIN +from .globals import resolve_color_default + +if t.TYPE_CHECKING: + import typing_extensions as te + + P = te.ParamSpec("P") + +R = t.TypeVar("R") + + +def _posixify(name: str) -> str: + return "-".join(name.split()).lower() + + +def safecall(func: t.Callable[P, R]) -> t.Callable[P, R | None]: + """Wraps a function so that it swallows exceptions.""" + + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R | None: + try: + return func(*args, **kwargs) + except Exception: + pass + return None + + return update_wrapper(wrapper, func) + + +def make_str(value: t.Any) -> str: + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(sys.getfilesystemencoding()) + except UnicodeError: + return value.decode("utf-8", "replace") + return str(value) + + +def make_default_short_help(help: str, max_length: int = 45) -> str: + """Returns a condensed version of help string.""" + # Consider only the first paragraph. + paragraph_end = help.find("\n\n") + + if paragraph_end != -1: + help = help[:paragraph_end] + + # Collapse newlines, tabs, and spaces. + words = help.split() + + if not words: + return "" + + # The first paragraph started with a "no rewrap" marker, ignore it. + if words[0] == "\b": + words = words[1:] + + total_length = 0 + last_index = len(words) - 1 + + for i, word in enumerate(words): + total_length += len(word) + (i > 0) + + if total_length > max_length: # too long, truncate + break + + if word[-1] == ".": # sentence end, truncate without "..." + return " ".join(words[: i + 1]) + + if total_length == max_length and i != last_index: + break # not at sentence end, truncate with "..." + else: + return " ".join(words) # no truncation needed + + # Account for the length of the suffix. + total_length += len("...") + + # remove words until the length is short enough + while i > 0: + total_length -= len(words[i]) + (i > 0) + + if total_length <= max_length: + break + + i -= 1 + + return " ".join(words[:i]) + "..." + + +class LazyFile: + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__( + self, + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + atomic: bool = False, + ): + self.name: str = os.fspath(filename) + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + self._f: t.IO[t.Any] | None + self.should_close: bool + + if self.name == "-": + self._f, self.should_close = open_stream(filename, mode, encoding, errors) + else: + if "r" in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name: str) -> t.Any: + return getattr(self.open(), name) + + def __repr__(self) -> str: + if self._f is not None: + return repr(self._f) + return f"" + + def open(self) -> t.IO[t.Any]: + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream( + self.name, self.mode, self.encoding, self.errors, atomic=self.atomic + ) + except OSError as e: + from .exceptions import FileError + + raise FileError(self.name, hint=e.strerror) from e + self._f = rv + return rv + + def close(self) -> None: + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self) -> None: + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self) -> LazyFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + self.close_intelligently() + + def __iter__(self) -> cabc.Iterator[t.AnyStr]: + self.open() + return iter(self._f) # type: ignore + + +class KeepOpenFile: + def __init__(self, file: t.IO[t.Any]) -> None: + self._file: t.IO[t.Any] = file + + def __getattr__(self, name: str) -> t.Any: + return getattr(self._file, name) + + def __enter__(self) -> KeepOpenFile: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + tb: TracebackType | None, + ) -> None: + pass + + def __repr__(self) -> str: + return repr(self._file) + + def __iter__(self) -> cabc.Iterator[t.AnyStr]: + return iter(self._file) + + +def echo( + message: t.Any | None = None, + file: t.IO[t.Any] | None = None, + nl: bool = True, + err: bool = False, + color: bool | None = None, +) -> None: + """Print a message and newline to stdout or a file. This should be + used instead of :func:`print` because it provides better support + for different data, files, and environments. + + Compared to :func:`print`, this does the following: + + - Ensures that the output encoding is not misconfigured on Linux. + - Supports Unicode in the Windows console. + - Supports writing to binary outputs, and supports writing bytes + to text outputs. + - Supports colors and styles on Windows. + - Removes ANSI color and style codes if the output does not look + like an interactive terminal. + - Always flushes the output. + + :param message: The string or bytes to output. Other objects are + converted to strings. + :param file: The file to write to. Defaults to ``stdout``. + :param err: Write to ``stderr`` instead of ``stdout``. + :param nl: Print a newline after the message. Enabled by default. + :param color: Force showing or hiding colors and other styles. By + default Click will remove color if the output does not look like + an interactive terminal. + + .. versionchanged:: 6.0 + Support Unicode output on the Windows console. Click does not + modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()`` + will still not support Unicode. + + .. versionchanged:: 4.0 + Added the ``color`` parameter. + + .. versionadded:: 3.0 + Added the ``err`` parameter. + + .. versionchanged:: 2.0 + Support colors on Windows if colorama is installed. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # There are no standard streams attached to write to. For example, + # pythonw on Windows. + if file is None: + return + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, (str, bytes, bytearray)): + out: str | bytes | bytearray | None = str(message) + else: + out = message + + if nl: + out = out or "" + if isinstance(out, str): + out += "\n" + else: + out += b"\n" + + if not out: + file.flush() + return + + # If there is a message and the value looks like bytes, we manually + # need to find the binary stream and write the message in there. + # This is done separately so that most stream types will work as you + # would expect. Eg: you can write to StringIO for other cases. + if isinstance(out, (bytes, bytearray)): + binary_file = _find_binary_writer(file) + + if binary_file is not None: + file.flush() + binary_file.write(out) + binary_file.flush() + return + + # ANSI style code support. For no message or bytes, nothing happens. + # When outputting to a file instead of a terminal, strip codes. + else: + color = resolve_color_default(color) + + if should_strip_ansi(file, color): + out = strip_ansi(out) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file, color) # type: ignore + elif not color: + out = strip_ansi(out) + + file.write(out) # type: ignore + file.flush() + + +def get_binary_stream(name: t.Literal["stdin", "stdout", "stderr"]) -> t.BinaryIO: + """Returns a system stream for byte processing. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener() + + +def get_text_stream( + name: t.Literal["stdin", "stdout", "stderr"], + encoding: str | None = None, + errors: str | None = "strict", +) -> t.TextIO: + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts for already + correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError(f"Unknown standard stream '{name}'") + return opener(encoding, errors) + + +def open_file( + filename: str | os.PathLike[str], + mode: str = "r", + encoding: str | None = None, + errors: str | None = "strict", + lazy: bool = False, + atomic: bool = False, +) -> t.IO[t.Any]: + """Open a file, with extra behavior to handle ``'-'`` to indicate + a standard stream, lazy open on write, and atomic write. Similar to + the behavior of the :class:`~click.File` param type. + + If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is + wrapped so that using it in a context manager will not close it. + This makes it possible to use the function without accidentally + closing a standard stream: + + .. code-block:: python + + with open_file(filename) as f: + ... + + :param filename: The name or Path of the file to open, or ``'-'`` for + ``stdin``/``stdout``. + :param mode: The mode in which to open the file. + :param encoding: The encoding to decode or encode a file opened in + text mode. + :param errors: The error handling mode. + :param lazy: Wait to open the file until it is accessed. For read + mode, the file is temporarily opened to raise access errors + early, then closed until it is read again. + :param atomic: Write to a temporary file and replace the given file + on close. + + .. versionadded:: 3.0 + """ + if lazy: + return t.cast( + "t.IO[t.Any]", LazyFile(filename, mode, encoding, errors, atomic=atomic) + ) + + f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic) + + if not should_close: + f = t.cast("t.IO[t.Any]", KeepOpenFile(f)) + + return f + + +def format_filename( + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes], + shorten: bool = False, +) -> str: + """Format a filename as a string for display. Ensures the filename can be + displayed by replacing any invalid bytes or surrogate escapes in the name + with the replacement character ``�``. + + Invalid bytes or surrogate escapes will raise an error when written to a + stream with ``errors="strict"``. This will typically happen with ``stdout`` + when the locale is something like ``en_GB.UTF-8``. + + Many scenarios *are* safe to write surrogates though, due to PEP 538 and + PEP 540, including: + + - Writing to ``stderr``, which uses ``errors="backslashreplace"``. + - The system has ``LANG=C.UTF-8``, ``C``, or ``POSIX``. Python opens + stdout and stderr with ``errors="surrogateescape"``. + - None of ``LANG/LC_*`` are set. Python assumes ``LANG=C.UTF-8``. + - Python is started in UTF-8 mode with ``PYTHONUTF8=1`` or ``-X utf8``. + Python opens stdout and stderr with ``errors="surrogateescape"``. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + else: + filename = os.fspath(filename) + + if isinstance(filename, bytes): + filename = filename.decode(sys.getfilesystemencoding(), "replace") + else: + filename = filename.encode("utf-8", "surrogateescape").decode( + "utf-8", "replace" + ) + + return filename + + +def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str: + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Windows (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Windows (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no effect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = "APPDATA" if roaming else "LOCALAPPDATA" + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser("~") + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}")) + if sys.platform == "darwin": + return os.path.join( + os.path.expanduser("~/Library/Application Support"), app_name + ) + return os.path.join( + os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), + _posixify(app_name), + ) + + +class PacifyFlushWrapper: + """This wrapper is used to catch and suppress BrokenPipeErrors resulting + from ``.flush()`` being called on broken pipe during the shutdown/final-GC + of the Python interpreter. Notably ``.flush()`` is always called on + ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any + other cleanup code, and the case where the underlying file is not a broken + pipe, all calls and attributes are proxied. + """ + + def __init__(self, wrapped: t.IO[t.Any]) -> None: + self.wrapped = wrapped + + def flush(self) -> None: + try: + self.wrapped.flush() + except OSError as e: + import errno + + if e.errno != errno.EPIPE: + raise + + def __getattr__(self, attr: str) -> t.Any: + return getattr(self.wrapped, attr) + + +def _detect_program_name( + path: str | None = None, _main: ModuleType | None = None +) -> str: + """Determine the command used to run the program, for use in help + text. If a file or entry point was executed, the file name is + returned. If ``python -m`` was used to execute a module or package, + ``python -m name`` is returned. + + This doesn't try to be too precise, the goal is to give a concise + name for help text. Files are only shown as their name without the + path. ``python`` is only shown for modules, and the full path to + ``sys.executable`` is not shown. + + :param path: The Python file being executed. Python puts this in + ``sys.argv[0]``, which is used by default. + :param _main: The ``__main__`` module. This should only be passed + during internal testing. + + .. versionadded:: 8.0 + Based on command args detection in the Werkzeug reloader. + + :meta private: + """ + if _main is None: + _main = sys.modules["__main__"] + + if not path: + path = sys.argv[0] + + # The value of __package__ indicates how Python was called. It may + # not exist if a setuptools script is installed as an egg. It may be + # set incorrectly for entry points created with pip on Windows. + # It is set to "" inside a Shiv or PEX zipapp. + if getattr(_main, "__package__", None) in {None, ""} or ( + os.name == "nt" + and _main.__package__ == "" + and not os.path.exists(path) + and os.path.exists(f"{path}.exe") + ): + # Executed a file, like "python app.py". + return os.path.basename(path) + + # Executed a module, like "python -m example". + # Rewritten by Python from "-m script" to "/path/to/script.py". + # Need to look at main module to determine how it was executed. + py_module = t.cast(str, _main.__package__) + name = os.path.splitext(os.path.basename(path))[0] + + # A submodule like "example.cli". + if name != "__main__": + py_module = f"{py_module}.{name}" + + return f"python -m {py_module.lstrip('.')}" + + +def _expand_args( + args: cabc.Iterable[str], + *, + user: bool = True, + env: bool = True, + glob_recursive: bool = True, +) -> list[str]: + """Simulate Unix shell expansion with Python functions. + + See :func:`glob.glob`, :func:`os.path.expanduser`, and + :func:`os.path.expandvars`. + + This is intended for use on Windows, where the shell does not do any + expansion. It may not exactly match what a Unix shell would do. + + :param args: List of command line arguments to expand. + :param user: Expand user home directory. + :param env: Expand environment variables. + :param glob_recursive: ``**`` matches directories recursively. + + .. versionchanged:: 8.1 + Invalid glob patterns are treated as empty expansions rather + than raising an error. + + .. versionadded:: 8.0 + + :meta private: + """ + from glob import glob + + out = [] + + for arg in args: + if user: + arg = os.path.expanduser(arg) + + if env: + arg = os.path.expandvars(arg) + + try: + matches = glob(arg, recursive=glob_recursive) + except re.error: + matches = [] + + if not matches: + out.append(arg) + else: + out.extend(matches) + + return out diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/INSTALLER b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/METADATA b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/METADATA new file mode 100644 index 0000000..757481a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/METADATA @@ -0,0 +1,221 @@ +Metadata-Version: 2.4 +Name: coverage +Version: 7.11.3 +Summary: Code coverage measurement for Python +Home-page: https://github.com/coveragepy/coveragepy +Author: Ned Batchelder and 244 others +Author-email: ned@nedbatchelder.com +License: Apache-2.0 +Project-URL: Documentation, https://coverage.readthedocs.io/en/7.11.3 +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi +Project-URL: Issues, https://github.com/coveragepy/coveragepy/issues +Project-URL: Mastodon, https://hachyderm.io/@coveragepy +Project-URL: Mastodon (nedbat), https://hachyderm.io/@nedbat +Keywords: code coverage testing +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3.15 +Classifier: Programming Language :: Python :: Free Threading :: 3 - Stable +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.10 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: toml +Requires-Dist: tomli; python_full_version <= "3.11.0a6" and extra == "toml" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: keywords +Dynamic: license +Dynamic: license-file +Dynamic: project-url +Dynamic: provides-extra +Dynamic: requires-python +Dynamic: summary + +.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +.. For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +=========== +Coverage.py +=========== + +Code coverage measurement for Python. + +.. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg + :target: https://vshymanskyy.github.io/StandWithUkraine + :alt: Stand with Ukraine + +------------- + +| |kit| |license| |versions| +| |test-status| |quality-status| |docs| |metacov| +| |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| + |bluesky-nedbat| + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +Coverage.py runs on these versions of Python: + +.. PYVERSIONS + +* Python 3.10 through 3.15 alpha, including free-threading. +* PyPy3 versions 3.10 and 3.11. + +Documentation is on `Read the Docs`_. Code repository and issue tracker are on +`GitHub`_. + +.. _Read the Docs: https://coverage.readthedocs.io/en/7.11.3/ +.. _GitHub: https://github.com/coveragepy/coveragepy + +**New in 7.x:** +``[run] patch`` setting; +``--save-signal`` option; +``[run] core`` setting; +``[run] source_dirs`` setting; +``Coverage.branch_stats()``; +multi-line exclusion patterns; +function/class reporting; +experimental support for sys.monitoring; +dropped support for Python up to 3.9; +added ``Coverage.collect()`` context manager; +improved data combining; +``[run] exclude_also`` setting; +``report --format=``; +type annotations. + +**New in 6.x:** +dropped support for Python 2.7, 3.5, and 3.6; +write data on SIGTERM; +added support for 3.10 match/case statements. + + +For Enterprise +-------------- + +.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - `Available as part of the Tidelift Subscription. `_ + Coverage and thousands of other packages are working with + Tidelift to deliver one enterprise subscription that covers all of the open + source you use. If you want the flexibility of open source and the confidence + of commercial-grade software, this is for you. + `Learn more. `_ + + +Getting Started +--------------- + +Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ +of the docs. + +.. _Quick Start section: https://coverage.readthedocs.io/en/7.11.3/#quick-start + + +Change history +-------------- + +The complete history of changes is on the `change history page`_. + +.. _change history page: https://coverage.readthedocs.io/en/7.11.3/changes.html + + +Code of Conduct +--------------- + +Everyone participating in the coverage.py project is expected to treat other +people with respect and to follow the guidelines articulated in the `Python +Community Code of Conduct`_. + +.. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ + + +Contributing +------------ + +Found a bug? Want to help improve the code or documentation? See the +`Contributing section`_ of the docs. + +.. _Contributing section: https://coverage.readthedocs.io/en/7.11.3/contributing.html + + +Security +-------- + +To report a security vulnerability, please use the `Tidelift security +contact`_. Tidelift will coordinate the fix and disclosure. + +.. _Tidelift security contact: https://tidelift.com/security + + +License +------- + +Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. + +.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 +.. _NOTICE.txt: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + + +.. |test-status| image:: https://github.com/coveragepy/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=main&event=push + :target: https://github.com/coveragepy/coveragepy/actions/workflows/testsuite.yml + :alt: Test suite status +.. |quality-status| image:: https://github.com/coveragepy/coveragepy/actions/workflows/quality.yml/badge.svg?branch=main&event=push + :target: https://github.com/coveragepy/coveragepy/actions/workflows/quality.yml + :alt: Quality check status +.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat + :target: https://coverage.readthedocs.io/en/7.11.3/ + :alt: Documentation +.. |kit| image:: https://img.shields.io/pypi/v/coverage + :target: https://pypi.org/project/coverage/ + :alt: PyPI status +.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 + :target: https://pypi.org/project/coverage/ + :alt: Python versions supported +.. |license| image:: https://img.shields.io/pypi/l/coverage.svg + :target: https://github.com/coveragepy/coveragepy/blob/main/LICENSE.txt + :alt: License +.. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json + :target: https://coveragepy.github.io/metacov-reports/latest.html + :alt: Coverage reports +.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + :alt: Tidelift +.. |stars| image:: https://img.shields.io/github/stars/coveragepy/coveragepy.svg?logo=github&style=flat + :target: https://github.com/coveragepy/coveragepy/stargazers + :alt: GitHub stars +.. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&label=@nedbat&query=followers_count&url=https%3A%2F%2Fhachyderm.io%2Fapi%2Fv1%2Faccounts%2Flookup%3Facct=nedbat + :target: https://hachyderm.io/@nedbat + :alt: nedbat on Mastodon +.. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&label=@coveragepy&query=followers_count&url=https%3A%2F%2Fhachyderm.io%2Fapi%2Fv1%2Faccounts%2Flookup%3Facct=coveragepy + :target: https://hachyderm.io/@coveragepy + :alt: coveragepy on Mastodon +.. |bluesky-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&color=96a3b0&labelColor=3686f7&logo=icloud&logoColor=white&label=@nedbat&url=https%3A%2F%2Fpublic.api.bsky.app%2Fxrpc%2Fapp.bsky.actor.getProfile%3Factor=nedbat.com&query=followersCount + :target: https://bsky.app/profile/nedbat.com + :alt: nedbat on Bluesky +.. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub + :target: https://github.com/sponsors/nedbat + :alt: Sponsor me on GitHub diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/RECORD b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/RECORD new file mode 100644 index 0000000..fcf8f16 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/RECORD @@ -0,0 +1,106 @@ +../../../bin/coverage,sha256=-6I-tGmIkX6GYEcU8oA2yHCLN1grgfu99ICQXHX8M7E,300 +../../../bin/coverage-3.11,sha256=-6I-tGmIkX6GYEcU8oA2yHCLN1grgfu99ICQXHX8M7E,300 +../../../bin/coverage3,sha256=-6I-tGmIkX6GYEcU8oA2yHCLN1grgfu99ICQXHX8M7E,300 +coverage-7.11.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coverage-7.11.3.dist-info/METADATA,sha256=pBsQdCYZGF90vUd6oIbIwbbGIFIQTxCQNvhwLf0EkPc,9074 +coverage-7.11.3.dist-info/RECORD,, +coverage-7.11.3.dist-info/WHEEL,sha256=TY6wS7uh4kKn2hb4-XnLjkub5JFV8id422w1jhyVjcQ,137 +coverage-7.11.3.dist-info/entry_points.txt,sha256=1YZ9VNHzvplT76fAhqRNQLG8wmPI5AtUKig-3sjqQJo,123 +coverage-7.11.3.dist-info/licenses/LICENSE.txt,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +coverage-7.11.3.dist-info/top_level.txt,sha256=BjhyiIvusb5OJkqCXjRncTF3soKF-mDOby-hxkWwwv0,9 +coverage/__init__.py,sha256=deRlSPNGXQa-6Mr9q3FpSXvS51dc-xpHxdLS58TDE3k,1065 +coverage/__main__.py,sha256=rAq5mnzJvTfjnZxufsY-YoKZkHM81vdhkUsAmOU4wt8,297 +coverage/__pycache__/__init__.cpython-311.pyc,, +coverage/__pycache__/__main__.cpython-311.pyc,, +coverage/__pycache__/annotate.cpython-311.pyc,, +coverage/__pycache__/bytecode.cpython-311.pyc,, +coverage/__pycache__/cmdline.cpython-311.pyc,, +coverage/__pycache__/collector.cpython-311.pyc,, +coverage/__pycache__/config.cpython-311.pyc,, +coverage/__pycache__/context.cpython-311.pyc,, +coverage/__pycache__/control.cpython-311.pyc,, +coverage/__pycache__/core.cpython-311.pyc,, +coverage/__pycache__/data.cpython-311.pyc,, +coverage/__pycache__/debug.cpython-311.pyc,, +coverage/__pycache__/disposition.cpython-311.pyc,, +coverage/__pycache__/env.cpython-311.pyc,, +coverage/__pycache__/exceptions.cpython-311.pyc,, +coverage/__pycache__/execfile.cpython-311.pyc,, +coverage/__pycache__/files.cpython-311.pyc,, +coverage/__pycache__/html.cpython-311.pyc,, +coverage/__pycache__/inorout.cpython-311.pyc,, +coverage/__pycache__/jsonreport.cpython-311.pyc,, +coverage/__pycache__/lcovreport.cpython-311.pyc,, +coverage/__pycache__/misc.cpython-311.pyc,, +coverage/__pycache__/multiproc.cpython-311.pyc,, +coverage/__pycache__/numbits.cpython-311.pyc,, +coverage/__pycache__/parser.cpython-311.pyc,, +coverage/__pycache__/patch.cpython-311.pyc,, +coverage/__pycache__/phystokens.cpython-311.pyc,, +coverage/__pycache__/plugin.cpython-311.pyc,, +coverage/__pycache__/plugin_support.cpython-311.pyc,, +coverage/__pycache__/python.cpython-311.pyc,, +coverage/__pycache__/pytracer.cpython-311.pyc,, +coverage/__pycache__/regions.cpython-311.pyc,, +coverage/__pycache__/report.cpython-311.pyc,, +coverage/__pycache__/report_core.cpython-311.pyc,, +coverage/__pycache__/results.cpython-311.pyc,, +coverage/__pycache__/sqldata.cpython-311.pyc,, +coverage/__pycache__/sqlitedb.cpython-311.pyc,, +coverage/__pycache__/sysmon.cpython-311.pyc,, +coverage/__pycache__/templite.cpython-311.pyc,, +coverage/__pycache__/tomlconfig.cpython-311.pyc,, +coverage/__pycache__/types.cpython-311.pyc,, +coverage/__pycache__/version.cpython-311.pyc,, +coverage/__pycache__/xmlreport.cpython-311.pyc,, +coverage/annotate.py,sha256=vI_P4Qj9W7OqdJaMJyvSp57hvT6ljCsnEf5ZyfaKvkM,3751 +coverage/bytecode.py,sha256=n_4YzE8Gas37i0mRgwvbTgu4v6yfekCh4qWZ7YVq0tk,6666 +coverage/cmdline.py,sha256=t7l_LoWAUhUuEmMogiVEAOducHdrudqmwezrZxVhaYE,36819 +coverage/collector.py,sha256=doMi0mv8Z-zDY3kf7NJifLjH3Kz7QuXr3o9aSrHzMTQ,18541 +coverage/config.py,sha256=tXVjZ0EwLI9oxEcRCVJZRiDktCXoUpD1d7L0JNvBM5Y,25964 +coverage/context.py,sha256=Ef1NlMuuD5g2Z3vJhK9fr6yg_NxOYTJmGTACRLU1uno,2434 +coverage/control.py,sha256=ZH_GxR7uc9uJBOkj1IKv0xsJP-63g3JuEDe21QQHz88,54818 +coverage/core.py,sha256=wQG--Xm1Hvyt_jYO7VgfP-CT2vE1o4zIbFI2CtfTsXw,5404 +coverage/data.py,sha256=b-4KXkMlpocqS-T_HHa1LlPOxnz-I35OpcH4ztfvYP4,8127 +coverage/debug.py,sha256=5f1JSbSVeQnulJTOu0E_kelo29cih5A9gS2o27OX3h8,21753 +coverage/disposition.py,sha256=T6p5yH1b6dnnsXq7YI9nbP8UAqFk6V9PyFOivkV5Qr8,1897 +coverage/env.py,sha256=_0HqQJiQIeY44ziVwiMohYIaox0btbc2fLoJMnSt1RI,4972 +coverage/exceptions.py,sha256=VD6utQATQ5GRIAK0SPJyOlL2och54qRfVD9vlt9EElA,1640 +coverage/execfile.py,sha256=IL3TzwhAxiMLs6QwUvF-HK-A2Scjgh8GjkpOKhcWFtY,11993 +coverage/files.py,sha256=WrI3dw_zEMG1YNS6674vFP9TPoUXvXg0itxVSWeOoNc,19357 +coverage/html.py,sha256=nr9NHi8Y6-9q9Y282-997kgj3Bsh4krf5Rer3PEvZ2k,31384 +coverage/htmlfiles/coverage_html.js,sha256=x-6GdVfJmH4P1Cdtj19cAJL9Hs7Nh8qKpyLm5YFO7FA,25476 +coverage/htmlfiles/favicon_32.png,sha256=vIEA-odDwRvSQ-syWfSwEnWGUWEv2b-Tv4tzTRfwJWE,1732 +coverage/htmlfiles/index.html,sha256=U-BpCphFKtZqwIv4Sm4xjOlkYjyRcayKOBeMwVSCp6Q,6843 +coverage/htmlfiles/keybd_closed.png,sha256=fZv4rmY3DkNJtPQjrFJ5UBOE5DdNof3mdeCZWC7TOoo,9004 +coverage/htmlfiles/pyfile.html,sha256=VPdmlDWSSnsvYZAE8CD0GVOsCkympyC4gNcEg8c0W54,6496 +coverage/htmlfiles/style.css,sha256=lou59uH1JVXagL3_9Pd0FQqiGP_uRDPuMxe4cr709Pw,15645 +coverage/htmlfiles/style.scss,sha256=rNzMLEmZXq_JUKt9VIAIr-pyAKKYI0PAPT1CmdZSX1Y,20915 +coverage/inorout.py,sha256=jFUz-I_g50G_8vGR935hWuwXdrNPep3nfnwVYmAVYEQ,24345 +coverage/jsonreport.py,sha256=jXqvzPJ6kHaSzOnJtLpn96a3eycCjEzMDK81dlqhBCM,7069 +coverage/lcovreport.py,sha256=Q9-g1QS7dUO-9ckqxecRs1_18yfxQPcHw-Mu8y2XU1U,7874 +coverage/misc.py,sha256=iPEV4g_HsXhQmpuzuN1CPy82uF2YLjKOleMnWgXJyDA,11291 +coverage/multiproc.py,sha256=Y1AeYjch8pD4Zb5HjoW51IVz5yeLDY5-ipIOOk-Adyk,4175 +coverage/numbits.py,sha256=4B171qTbHyZ0WDnYGjGo456_PC2jDZSe22F9KCrmZ4Q,4673 +coverage/parser.py,sha256=fefhsL5sKNpJhNFfQx60yhkkcchIQ7ZtBhPMvDxvQ-s,46276 +coverage/patch.py,sha256=oEcBoH6lcSAB9Y0jxuk3ZbFWqJI05R76fJtMHtsp8PY,5567 +coverage/phystokens.py,sha256=Z7chMvCxuYMu2Wj9Oc1vlO8dV1OOqZXWfzALsWfEC7s,7450 +coverage/plugin.py,sha256=4fF3Bq9JVZIIxPUI-DCUGUqeGNzvYldJnqMvP6JlsKY,21509 +coverage/plugin_support.py,sha256=X0I5D3q7MX2IsZZUBBkJJ6lP3guF-zNo_2oTu1XyePs,10444 +coverage/py.typed,sha256=_B1ZXy5hKJZ2Zo3jWSXjqy1SO3rnLdZsUULnKGTplfc,72 +coverage/python.py,sha256=4wPzjJ1eEhDfqboUdLki8wyvzIIIbHvUZafKRXw2vls,8589 +coverage/pytracer.py,sha256=RBs6GaQQHSEau-eSiuyjIyS64W5Jg61GRVRO2wo1rZA,15322 +coverage/regions.py,sha256=hIf6Hly1Zsfl4hPA2FQgDtDT6Lcv0fLlFCXmc94WOlY,4500 +coverage/report.py,sha256=QrMfQaXfghss5g301vp7BudGLaQXD_eaKrFLFFC4Ixg,10818 +coverage/report_core.py,sha256=xyhYM93YVQUu05fDpPkG1raDtTQyQbzPUjIwrlUf-ns,4044 +coverage/results.py,sha256=YBNUYtLI1luEYCfBi59fE6857OegIRBKxlqNP02vLPQ,16103 +coverage/sqldata.py,sha256=ba76NYiiMWNwfsFWxTrrgjSVKVvA9vw42x4cZv-c_dU,45528 +coverage/sqlitedb.py,sha256=R6A4B4D0LQNqMUPZTE4W66hC7V5OenHN9hRWb_pXJSg,10024 +coverage/sysmon.py,sha256=JgZCJ0RPVExxEHlgpRnQATR9qvo57pxZDCYiadD4fVQ,18414 +coverage/templite.py,sha256=pi8Jp54P7X8ME_KnOMLItBstn06Bv-QC2VYsSHEBw2k,10812 +coverage/tomlconfig.py,sha256=9KXQWUPpnqkmRYFhyJMS8ZOfJ4bG69EsskVXm_NT7j8,7558 +coverage/tracer.cpython-311-darwin.so,sha256=UEy4XVJ4s8XLHKQPoss3Bu0V9bqRDC9zuLhQjTozZQI,23312 +coverage/tracer.pyi,sha256=53ZiaWNz6q6qWiEZWFMHeLLkbg6-4oBAzNJ2i1-hA-g,1207 +coverage/types.py,sha256=nOLSWf-CMhaa9h7SMTcueFsklX0vuLdXdmsqzp2Tuqg,5600 +coverage/version.py,sha256=hjz6erHRvxMtg6TOU4UK1Vgrh4WsiRLYXTi0_9qGsd4,1094 +coverage/xmlreport.py,sha256=ayeJ8DEqIX6f9pdRFJvULIGA4WJ8wDR9BEjoKLdK1PA,9871 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/WHEEL b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/WHEEL new file mode 100644 index 0000000..2966a43 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp311-cp311-macosx_10_9_x86_64 +Generator: delocate 0.13.0 + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/entry_points.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/entry_points.txt new file mode 100644 index 0000000..55e5f6b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +coverage = coverage.cmdline:main +coverage-3.11 = coverage.cmdline:main +coverage3 = coverage.cmdline:main diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/licenses/LICENSE.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/licenses/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/top_level.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/top_level.txt new file mode 100644 index 0000000..4ebc8ae --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage-7.11.3.dist-info/top_level.txt @@ -0,0 +1 @@ +coverage diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__init__.py new file mode 100644 index 0000000..1f7086a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__init__.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +""" +Code coverage measurement for Python. + +Ned Batchelder +https://coverage.readthedocs.io + +""" + +from __future__ import annotations + +# isort: skip_file + +# mypy's convention is that "import as" names are public from the module. +# We import names as themselves to indicate that. Pylint sees it as pointless, +# so disable its warning. +# pylint: disable=useless-import-alias + +from coverage.version import ( + __version__ as __version__, + version_info as version_info, +) + +from coverage.control import ( + Coverage as Coverage, + process_startup as process_startup, +) +from coverage.data import CoverageData as CoverageData +from coverage.exceptions import CoverageException as CoverageException +from coverage.plugin import ( + CodeRegion as CodeRegion, + CoveragePlugin as CoveragePlugin, + FileReporter as FileReporter, + FileTracer as FileTracer, +) + +# Backward compatibility. +coverage = Coverage diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__main__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__main__.py new file mode 100644 index 0000000..d9d42b9 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/__main__.py @@ -0,0 +1,12 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Coverage.py's main entry point.""" + +from __future__ import annotations + +import sys + +from coverage.cmdline import main + +sys.exit(main()) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/annotate.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/annotate.py new file mode 100644 index 0000000..2f6bd7d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/annotate.py @@ -0,0 +1,114 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Source file annotation for coverage.py.""" + +from __future__ import annotations + +import os +import re +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from coverage.files import flat_rootname +from coverage.misc import ensure_dir, isolate_module +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +class AnnotateReporter: + """Generate annotated source files showing line coverage. + + This reporter creates annotated copies of the measured source files. Each + .py file is copied as a .py,cover file, with a left-hand margin annotating + each line:: + + > def h(x): + - if 0: #pragma: no cover + - pass + > if x == 1: + ! a = 1 + > else: + > a = 2 + + > h(2) + + Executed lines use ">", lines not executed use "!", lines excluded from + consideration use "-". + + """ + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.directory: str | None = None + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None: + """Run the report. + + See `coverage.report()` for arguments. + + """ + self.directory = directory + self.coverage.get_data() + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.annotate_file(fr, analysis) + + def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Annotate a single file. + + `fr` is the FileReporter for the file to annotate. + + """ + statements = sorted(analysis.statements) + missing = sorted(analysis.missing) + excluded = sorted(analysis.excluded) + + if self.directory: + ensure_dir(self.directory) + dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) + assert dest_file.endswith("_py") + dest_file = dest_file[:-3] + ".py" + else: + dest_file = fr.filename + dest_file += ",cover" + + with open(dest_file, "w", encoding="utf-8") as dest: + i = j = 0 + covered = True + source = fr.source() + for lineno, line in enumerate(source.splitlines(True), start=1): + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(" ") + elif self.else_re.match(line): + # Special logic for lines containing only "else:". + if j >= len(missing): + dest.write("> ") + elif statements[i] == missing[j]: + dest.write("! ") + else: + dest.write("> ") + elif lineno in excluded: + dest.write("- ") + elif covered: + dest.write("> ") + else: + dest.write("! ") + + dest.write(line) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/bytecode.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/bytecode.py new file mode 100644 index 0000000..a38d974 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/bytecode.py @@ -0,0 +1,196 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Bytecode analysis for coverage.py""" + +from __future__ import annotations + +import collections +import dis +from types import CodeType +from typing import Iterable, Mapping, Optional + +from coverage.types import TArc, TLineNo, TOffset + + +def code_objects(code: CodeType) -> Iterable[CodeType]: + """Iterate over all the code objects in `code`.""" + stack = [code] + while stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = stack.pop() + for c in code.co_consts: + if isinstance(c, CodeType): + stack.append(c) + yield code + + +def op_set(*op_names: str) -> set[int]: + """Make a set of opcodes from instruction names. + + The names might not exist in this version of Python, skip those if not. + """ + ops = {op for name in op_names if (op := dis.opmap.get(name))} + assert ops, f"At least one opcode must exist: {op_names}" + return ops + + +# Opcodes that are unconditional jumps elsewhere. +ALWAYS_JUMPS = op_set( + "JUMP_BACKWARD", + "JUMP_BACKWARD_NO_INTERRUPT", + "JUMP_FORWARD", +) + +# Opcodes that exit from a function. +RETURNS = op_set( + "RETURN_VALUE", + "RETURN_GENERATOR", +) + +# Opcodes that do nothing. +NOPS = op_set( + "NOP", + "NOT_TAKEN", +) + + +class InstructionWalker: + """Utility to step through trails of instructions. + + We have two reasons to need sequences of instructions from a code object: + First, in strict sequence to visit all the instructions in the object. + This is `walk(follow_jumps=False)`. Second, we want to follow jumps to + understand how execution will flow: `walk(follow_jumps=True)`. + """ + + def __init__(self, code: CodeType) -> None: + self.code = code + self.insts: dict[TOffset, dis.Instruction] = {} + + inst = None + for inst in dis.get_instructions(code): + self.insts[inst.offset] = inst + + assert inst is not None + self.max_offset = inst.offset + + def walk( + self, *, start_at: TOffset = 0, follow_jumps: bool = True + ) -> Iterable[dis.Instruction]: + """ + Yield instructions starting from `start_at`. Follow unconditional + jumps if `follow_jumps` is true. + """ + seen = set() + offset = start_at + while offset < self.max_offset + 1: + if offset in seen: + break + seen.add(offset) + if inst := self.insts.get(offset): + yield inst + if follow_jumps and inst.opcode in ALWAYS_JUMPS: + offset = inst.jump_target + continue + offset += 2 + + +TBranchTrailsOneSource = dict[Optional[TArc], set[TOffset]] +TBranchTrails = dict[TOffset, TBranchTrailsOneSource] + + +def branch_trails( + code: CodeType, + multiline_map: Mapping[TLineNo, TLineNo], +) -> TBranchTrails: + """ + Calculate branch trails for `code`. + + `multiline_map` maps line numbers to the first line number of a + multi-line statement. + + Instructions can have a jump_target, where they might jump to next. Some + instructions with a jump_target are unconditional jumps (ALWAYS_JUMPS), so + they aren't interesting to us, since they aren't the start of a branch + possibility. + + Instructions that might or might not jump somewhere else are branch + possibilities. For each of those, we track a trail of instructions. These + are lists of instruction offsets, the next instructions that can execute. + We follow the trail until we get to a new source line. That gives us the + arc from the original instruction's line to the new source line. + + """ + the_trails: TBranchTrails = collections.defaultdict(lambda: collections.defaultdict(set)) + iwalker = InstructionWalker(code) + for inst in iwalker.walk(follow_jumps=False): + if not inst.jump_target: + # We only care about instructions with jump targets. + continue + if inst.opcode in ALWAYS_JUMPS: + # We don't care about unconditional jumps. + continue + + from_line = inst.line_number + if from_line is None: + continue + from_line = multiline_map.get(from_line, from_line) + + def add_one_branch_trail( + trails: TBranchTrailsOneSource, + start_at: TOffset, + ) -> None: + # pylint: disable=cell-var-from-loop + inst_offsets: set[TOffset] = set() + to_line = None + for inst2 in iwalker.walk(start_at=start_at, follow_jumps=True): + inst_offsets.add(inst2.offset) + l2 = inst2.line_number + if l2 is not None: + l2 = multiline_map.get(l2, l2) + if l2 and l2 != from_line: + to_line = l2 + break + elif inst2.jump_target and (inst2.opcode not in ALWAYS_JUMPS): + break + elif inst2.opcode in RETURNS: + to_line = -code.co_firstlineno + break + if to_line is not None: + trails[(from_line, to_line)].update(inst_offsets) + else: + trails[None] = set() + + # Calculate two trails: one from the next instruction, and one from the + # jump_target instruction. + trails: TBranchTrailsOneSource = collections.defaultdict(set) + add_one_branch_trail(trails, start_at=inst.offset + 2) + add_one_branch_trail(trails, start_at=inst.jump_target) + the_trails[inst.offset] = trails + + # Sometimes we get BRANCH_RIGHT or BRANCH_LEFT events from instructions + # other than the original jump possibility instruction. Register each + # trail under all of their offsets so we can pick up in the middle of a + # trail if need be. + for arc, offsets in trails.items(): + for offset in offsets: + the_trails[offset][arc].update(offsets) + + return the_trails + + +def always_jumps(code: CodeType) -> dict[TOffset, TOffset]: + """Make a map of unconditional bytecodes jumping to others. + + Only include bytecodes that do no work and go to another bytecode. + """ + jumps = {} + iwalker = InstructionWalker(code) + for inst in iwalker.walk(follow_jumps=False): + if inst.opcode in ALWAYS_JUMPS: + jumps[inst.offset] = inst.jump_target + elif inst.opcode in NOPS: + jumps[inst.offset] = inst.offset + 2 + return jumps diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/cmdline.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/cmdline.py new file mode 100644 index 0000000..5287327 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/cmdline.py @@ -0,0 +1,1184 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Command-line support for coverage.py.""" + +from __future__ import annotations + +import glob +import optparse +import os +import os.path +import shlex +import signal +import sys +import textwrap +import traceback +import types +from typing import Any, NoReturn, cast + +import coverage +from coverage import Coverage, env +from coverage.config import CoverageConfig +from coverage.control import DEFAULT_DATAFILE +from coverage.core import CTRACER_FILE +from coverage.data import CoverageData, combinable_files, debug_data_file +from coverage.debug import info_header, short_stack, write_formatted_info +from coverage.exceptions import NoSource, CoverageException, _ExceptionDuringRun +from coverage.execfile import PyRunner +from coverage.results import display_covered, should_fail_under +from coverage.version import __url__ + +# When adding to this file, alphabetization is important. Look for +# "alphabetize" comments throughout. + + +def oneline(text: str) -> str: + """Turn a multi-line string into one line for help to reformat nicely.""" + return " ".join(text.split()) + + +class Opts: + """A namespace class for individual options we'll build parsers from.""" + + # Keep these entries alphabetized (roughly) by the option name as it + # appears on the command line. + + append = optparse.make_option( + "-a", + "--append", + action="store_true", + help="Append data to the data file. Otherwise it starts clean each time.", + ) + branch = optparse.make_option( + "", + "--branch", + action="store_true", + help="Measure branch coverage in addition to statement coverage.", + ) + concurrency = optparse.make_option( + "", + "--concurrency", + action="store", + metavar="LIBS", + help=oneline( + """ + Properly measure code using a concurrency library. + Valid values are: {}, or a comma-list of them. + """ + ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), + ) + context = optparse.make_option( + "", + "--context", + action="store", + metavar="LABEL", + help="The context label to record for this coverage run.", + ) + contexts = optparse.make_option( + "", + "--contexts", + action="store", + metavar="REGEX1,REGEX2,...", + help=oneline( + """ + Only display data from lines covered in the given contexts. + Accepts Python regexes, which must be quoted. + """ + ), + ) + datafile = optparse.make_option( + "", + "--data-file", + action="store", + metavar="DATAFILE", + help=oneline( + """ + Base name of the data files to operate on. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ + ), + ) + datafle_input = optparse.make_option( + "", + "--data-file", + action="store", + metavar="INFILE", + help=oneline( + """ + Read coverage data for report generation from this file. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ + ), + ) + datafile_output = optparse.make_option( + "", + "--data-file", + action="store", + metavar="OUTFILE", + help=oneline( + """ + Write the recorded coverage data to this file. + Defaults to '.coverage'. [env: COVERAGE_FILE] + """ + ), + ) + debug = optparse.make_option( + "", + "--debug", + action="store", + metavar="OPTS", + help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", + ) + directory = optparse.make_option( + "-d", + "--directory", + action="store", + metavar="DIR", + help="Write the output files to DIR.", + ) + fail_under = optparse.make_option( + "", + "--fail-under", + action="store", + metavar="MIN", + type="float", + help="Exit with a status of 2 if the total coverage is less than MIN.", + ) + format = optparse.make_option( + "", + "--format", + action="store", + metavar="FORMAT", + help="Output format, either text (default), markdown, or total.", + ) + help = optparse.make_option( + "-h", + "--help", + action="store_true", + help="Get help on this command.", + ) + ignore_errors = optparse.make_option( + "-i", + "--ignore-errors", + action="store_true", + help="Ignore errors while reading source files.", + ) + include = optparse.make_option( + "", + "--include", + action="store", + metavar="PAT1,PAT2,...", + help=oneline( + """ + Include only files whose paths match one of these patterns. + Accepts shell-style wildcards, which must be quoted. + """ + ), + ) + keep = optparse.make_option( + "", + "--keep", + action="store_true", + help="Keep original coverage files, otherwise they are deleted.", + ) + pylib = optparse.make_option( + "-L", + "--pylib", + action="store_true", + help=oneline( + """ + Measure coverage even inside the Python installed library, + which isn't done by default. + """ + ), + ) + show_missing = optparse.make_option( + "-m", + "--show-missing", + action="store_true", + help="Show line numbers of statements in each module that weren't executed.", + ) + module = optparse.make_option( + "-m", + "--module", + action="store_true", + help=oneline( + """ + is an importable Python module, not a script path, + to be run as 'python -m' would run it. + """ + ), + ) + omit = optparse.make_option( + "", + "--omit", + action="store", + metavar="PAT1,PAT2,...", + help=oneline( + """ + Omit files whose paths match one of these patterns. + Accepts shell-style wildcards, which must be quoted. + """ + ), + ) + output_xml = optparse.make_option( + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'", + ) + output_json = optparse.make_option( + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", + help="Write the JSON report to this file. Defaults to 'coverage.json'", + ) + output_lcov = optparse.make_option( + "-o", + "", + action="store", + dest="outfile", + metavar="OUTFILE", + help="Write the LCOV report to this file. Defaults to 'coverage.lcov'", + ) + json_pretty_print = optparse.make_option( + "", + "--pretty-print", + action="store_true", + help="Format the JSON for human readers.", + ) + parallel_mode = optparse.make_option( + "-p", + "--parallel-mode", + action="store_true", + help=oneline( + """ + Append a unique suffix to the data file name to collect separate + data from multiple processes. + """ + ), + ) + precision = optparse.make_option( + "", + "--precision", + action="store", + metavar="N", + type=int, + help=oneline( + """ + Number of digits after the decimal point to display for + reported coverage percentages. + """ + ), + ) + quiet = optparse.make_option( + "-q", + "--quiet", + action="store_true", + help="Don't print messages about what is happening.", + ) + rcfile = optparse.make_option( + "", + "--rcfile", + action="store", + help=oneline( + """ + Specify configuration file. + By default '.coveragerc', 'setup.cfg', 'tox.ini', and + 'pyproject.toml' are tried. [env: COVERAGE_RCFILE] + """ + ), + ) + save_signal = optparse.make_option( + "", + "--save-signal", + action="store", + metavar="SIGNAL", + choices=["USR1", "USR2"], + help=oneline( + """ + Specify a signal that will trigger coverage to write its collected data. + Supported values are: USR1, USR2. Not available on Windows. + """ + ), + ) + show_contexts = optparse.make_option( + "--show-contexts", + action="store_true", + help="Show contexts for covered lines.", + ) + skip_covered = optparse.make_option( + "--skip-covered", + action="store_true", + help="Skip files with 100% coverage.", + ) + no_skip_covered = optparse.make_option( + "--no-skip-covered", + action="store_false", + dest="skip_covered", + help="Disable --skip-covered.", + ) + skip_empty = optparse.make_option( + "--skip-empty", + action="store_true", + help="Skip files with no code.", + ) + sort = optparse.make_option( + "--sort", + action="store", + metavar="COLUMN", + help=oneline( + """ + Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. + Default is name. + """ + ), + ) + source = optparse.make_option( + "", + "--source", + action="store", + metavar="SRC1,SRC2,...", + help="A list of directories or importable names of code to measure.", + ) + timid = optparse.make_option( + "", + "--timid", + action="store_true", + help="Use the slower Python trace function core.", + ) + title = optparse.make_option( + "", + "--title", + action="store", + metavar="TITLE", + help="A text string to use as the title on the HTML.", + ) + version = optparse.make_option( + "", + "--version", + action="store_true", + help="Display version information and exit.", + ) + + +class CoverageOptionParser(optparse.OptionParser): + """Base OptionParser for coverage.py. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + kwargs["add_help_option"] = False + super().__init__(*args, **kwargs) + self.set_defaults( + # Keep these arguments alphabetized by their names. + action=None, + append=None, + branch=None, + concurrency=None, + context=None, + contexts=None, + data_file=None, + debug=None, + directory=None, + fail_under=None, + format=None, + help=None, + ignore_errors=None, + include=None, + keep=None, + module=None, + omit=None, + parallel_mode=None, + precision=None, + pylib=None, + quiet=None, + rcfile=True, + save_signal=None, + show_contexts=None, + show_missing=None, + skip_covered=None, + skip_empty=None, + sort=None, + source=None, + timid=None, + title=None, + version=None, + ) + + self.disable_interspersed_args() + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + + pass + + def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = super().parse_args(args) + except self.OptionParserError: + return False, None, [] + return True, options, args + + def error(self, msg: str) -> NoReturn: + """Override optparse.error so sys.exit doesn't get called.""" + show_help(msg) + raise self.OptionParserError + + +class GlobalOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py global option arguments.""" + + def __init__(self) -> None: + super().__init__() + + self.add_options( + [ + Opts.help, + Opts.version, + ] + ) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__( + self, + action: str, + options: list[optparse.Option], + description: str, + usage: str | None = None, + ): + """Create an OptionParser for a coverage.py command. + + `action` is the slug to put into `options.action`. + `options` is a list of Option's for the command. + `description` is the description of the command, for the help text. + `usage` is the usage string to display in help. + + """ + if usage: + usage = "%prog " + usage + super().__init__( + usage=usage, + description=description, + ) + self.set_defaults(action=action) + self.add_options(options) + self.cmd = action + + def __eq__(self, other: str) -> bool: # type: ignore[override] + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return other == f"" + + __hash__ = None # type: ignore[assignment] + + def get_prog_name(self) -> str: + """Override of an undocumented function in optparse.OptionParser.""" + program_name = super().get_prog_name() + + # Include the sub-command for this parser as part of the command. + return f"{program_name} {self.cmd}" + + +# In lists of Opts, keep them alphabetized by the option names as they appear +# on the command line, since these lists determine the order of the options in +# the help output. +# +# In COMMANDS, keep the keys (command names) alphabetized. + +GLOBAL_ARGS = [ + Opts.debug, + Opts.help, + Opts.rcfile, +] + +COMMANDS = { + "annotate": CmdOptionParser( + "annotate", + [ + Opts.directory, + Opts.datafle_input, + Opts.ignore_errors, + Opts.include, + Opts.omit, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description=oneline( + """ + Make annotated copies of the given files, marking statements that are executed + with > and statements that are missed with !. + """ + ), + ), + "combine": CmdOptionParser( + "combine", + [ + Opts.append, + Opts.datafile, + Opts.keep, + Opts.quiet, + ] + + GLOBAL_ARGS, + usage="[options] ... ", + description=oneline( + """ + Combine data from multiple coverage files. + The combined results are written to a single + file representing the union of the data. The positional + arguments are data files or directories containing data files. + If no paths are provided, data files in the default data file's + directory are combined. + """ + ), + ), + "debug": CmdOptionParser( + "debug", + GLOBAL_ARGS, + usage="", + description=oneline( + """ + Display information about the internals of coverage.py, + for diagnosing problems. + Topics are: + 'data' to show a summary of the collected data; + 'sys' to show installation information; + 'config' to show the configuration; + 'premain' to show what is calling coverage; + 'pybehave' to show internal flags describing Python behavior; + 'sqlite' to show SQLite compilation options. + """ + ), + ), + "erase": CmdOptionParser( + "erase", + [ + Opts.datafile, + ] + + GLOBAL_ARGS, + description="Erase previously collected coverage data.", + ), + "help": CmdOptionParser( + "help", + GLOBAL_ARGS, + usage="[command]", + description="Describe how to use coverage.py", + ), + "html": CmdOptionParser( + "html", + [ + Opts.contexts, + Opts.directory, + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.quiet, + Opts.show_contexts, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + Opts.title, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description=oneline( + """ + Create an HTML report of the coverage of the files. + Each file gets its own page, with the source decorated to show + executed, excluded, and missed lines. + """ + ), + ), + "json": CmdOptionParser( + "json", + [ + Opts.contexts, + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_json, + Opts.json_pretty_print, + Opts.quiet, + Opts.show_contexts, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate a JSON report of coverage results.", + ), + "lcov": CmdOptionParser( + "lcov", + [ + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.output_lcov, + Opts.omit, + Opts.quiet, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an LCOV report of coverage results.", + ), + "report": CmdOptionParser( + "report", + [ + Opts.contexts, + Opts.datafle_input, + Opts.fail_under, + Opts.format, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.sort, + Opts.show_missing, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description="Report coverage statistics on modules.", + ), + "run": CmdOptionParser( + "run", + [ + Opts.append, + Opts.branch, + Opts.concurrency, + Opts.context, + Opts.datafile_output, + Opts.include, + Opts.module, + Opts.omit, + Opts.pylib, + Opts.parallel_mode, + Opts.save_signal, + Opts.source, + Opts.timid, + ] + + GLOBAL_ARGS, + usage="[options] [program options]", + description="Run a Python program, measuring code execution.", + ), + "xml": CmdOptionParser( + "xml", + [ + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_xml, + Opts.quiet, + Opts.skip_empty, + ] + + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an XML report of coverage results.", + ), +} + + +def show_help( + error: str | None = None, + topic: str | None = None, + parser: optparse.OptionParser | None = None, +) -> None: + """Display an error message, or the named topic.""" + assert error or topic or parser + + program_path = sys.argv[0] + if program_path.endswith(os.path.sep + "__main__.py"): + # The path is the main module of a package; get that path instead. + program_path = os.path.dirname(program_path) + program_name = os.path.basename(program_path) + if env.WINDOWS: + # entry_points={"console_scripts":...} on Windows makes files + # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These + # invoke coverage-script.py, coverage3-script.py, and + # coverage-3.5-script.py. argv[0] is the .py file, but we want to + # get back to the original form. + auto_suffix = "-script.py" + if program_name.endswith(auto_suffix): + program_name = program_name[: -len(auto_suffix)] + + help_params = dict(coverage.__dict__) + help_params["__url__"] = __url__ + help_params["program_name"] = program_name + if CTRACER_FILE: + help_params["extension_modifier"] = "with C extension" + else: + help_params["extension_modifier"] = "without C extension" + + if error: + print(error, file=sys.stderr) + print(f"Use '{program_name} help' for help.", file=sys.stderr) + elif parser: + print(parser.format_help().strip()) + print() + else: + assert topic is not None + help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip() + if help_msg: + print(help_msg.format(**help_params)) + else: + print(f"Don't know topic {topic!r}") + print("Full documentation is at {__url__}".format(**help_params)) + + +OK, ERR, FAIL_UNDER = 0, 1, 2 + + +class CoverageScript: + """The command-line interface to coverage.py.""" + + def __init__(self) -> None: + self.global_option = False + self.coverage: Coverage + + def command_line(self, argv: list[str]) -> int: + """The bulk of the command line interface to coverage.py. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + if not argv: + show_help(topic="minimum_help") + return OK + + # The command syntax we parse depends on the first argument. Global + # switch syntax always starts with an option. + parser: optparse.OptionParser | None + self.global_option = argv[0].startswith("-") + if self.global_option: + parser = GlobalOptionParser() + else: + parser = COMMANDS.get(argv[0]) + if not parser: + show_help(f"Unknown command: {argv[0]!r}") + return ERR + argv = argv[1:] + + ok, options, args = parser.parse_args_ok(argv) + if not ok: + return ERR + assert options is not None + + # Handle help and version. + if self.do_help(options, args, parser): + return OK + + # Listify the list options. + source = unshell_list(options.source) + omit = unshell_list(options.omit) + include = unshell_list(options.include) + debug = unshell_list(options.debug) + contexts = unshell_list(options.contexts) + + if options.concurrency is not None: + concurrency = options.concurrency.split(",") + else: + concurrency = None + + # Do something. + self.coverage = Coverage( + data_file=options.data_file or DEFAULT_DATAFILE, + data_suffix=options.parallel_mode, + cover_pylib=options.pylib, + timid=options.timid, + branch=options.branch, + config_file=options.rcfile, + source=source, + omit=omit, + include=include, + debug=debug, + concurrency=concurrency, + check_preimported=True, + context=options.context, + messages=not options.quiet, + ) + + if options.action == "debug": + return self.do_debug(args) + + elif options.action == "erase": + self.coverage.erase() + return OK + + elif options.action == "run": + return self.do_run(options, args) + + elif options.action == "combine": + if options.append: + self.coverage.load() + data_paths = args or None + self.coverage.combine(data_paths, strict=True, keep=bool(options.keep)) + self.coverage.save() + return OK + + # Remaining actions are reporting, with some common options. + report_args = dict( + morfs=unglob_args(args), + ignore_errors=options.ignore_errors, + omit=omit, + include=include, + contexts=contexts, + ) + + # We need to be able to import from the current directory, because + # plugins may try to, for example, to read Django settings. + sys.path.insert(0, "") + + self.coverage.load() + + total = None + if options.action == "report": + total = self.coverage.report( + precision=options.precision, + show_missing=options.show_missing, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + sort=options.sort, + output_format=options.format, + **report_args, + ) + elif options.action == "annotate": + self.coverage.annotate(directory=options.directory, **report_args) + elif options.action == "html": + total = self.coverage.html_report( + directory=options.directory, + precision=options.precision, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + show_contexts=options.show_contexts, + title=options.title, + **report_args, + ) + elif options.action == "xml": + total = self.coverage.xml_report( + outfile=options.outfile, + skip_empty=options.skip_empty, + **report_args, + ) + elif options.action == "json": + total = self.coverage.json_report( + outfile=options.outfile, + pretty_print=options.pretty_print, + show_contexts=options.show_contexts, + **report_args, + ) + elif options.action == "lcov": + total = self.coverage.lcov_report( + outfile=options.outfile, + **report_args, + ) + else: + # There are no other possible actions. + raise AssertionError + + if total is not None: + # Apply the command line fail-under options, and then use the config + # value, so we can get fail_under from the config file. + if options.fail_under is not None: + self.coverage.set_option("report:fail_under", options.fail_under) + if options.precision is not None: + self.coverage.set_option("report:precision", options.precision) + + fail_under = cast(float, self.coverage.get_option("report:fail_under")) + precision = cast(int, self.coverage.get_option("report:precision")) + if should_fail_under(total, fail_under, precision): + msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( + total=display_covered(total, precision), + fail_under=fail_under, + p=precision, + ) + print("Coverage failure:", msg) + return FAIL_UNDER + + return OK + + def do_help( + self, + options: optparse.Values, + args: list[str], + parser: optparse.OptionParser, + ) -> bool: + """Deal with help requests. + + Return True if it handled the request, False if not. + + """ + # Handle help. + if options.help: + if self.global_option: + show_help(topic="help") + else: + show_help(parser=parser) + return True + + if options.action == "help": + if args: + for a in args: + parser_maybe = COMMANDS.get(a) + if parser_maybe is not None: + show_help(parser=parser_maybe) + else: + show_help(topic=a) + else: + show_help(topic="help") + return True + + # Handle version. + if options.version: + show_help(topic="version") + return True + + return False + + def do_signal_save(self, _signum: int, _frame: types.FrameType | None) -> None: + """Signal handler to save coverage report""" + print("Saving coverage data...", flush=True) + self.coverage.save() + + def do_run(self, options: optparse.Values, args: list[str]) -> int: + """Implementation of 'coverage run'.""" + + if not args: + if options.module: + # Specified -m with nothing else. + show_help("No module specified for -m") + return ERR + command_line = cast(str, self.coverage.get_option("run:command_line")) + if command_line is not None: + args = shlex.split(command_line) + if args and args[0] in {"-m", "--module"}: + options.module = True + args = args[1:] + if not args: + show_help("Nothing to do.") + return ERR + + if options.append and self.coverage.get_option("run:parallel"): + show_help("Can't append to data files in parallel mode.") + return ERR + + if options.concurrency == "multiprocessing": + # Can't set other run-affecting command line options with + # multiprocessing. + for opt_name in ["branch", "include", "omit", "pylib", "source", "timid"]: + # As it happens, all of these options have no default, meaning + # they will be None if they have not been specified. + if getattr(options, opt_name) is not None: + show_help( + "Options affecting multiprocessing must only be specified " + + "in a configuration file.\n" + + f"Remove --{opt_name} from the command line.", + ) + return ERR + + os.environ["COVERAGE_RUN"] = "true" + + runner = PyRunner(args, as_module=bool(options.module)) + runner.prepare() + + if options.append: + self.coverage.load() + + if options.save_signal: + if env.WINDOWS: + show_help("--save-signal is not supported on Windows.") + return ERR + sig = getattr(signal, f"SIG{options.save_signal}") + signal.signal(sig, self.do_signal_save) + + # Run the script. + self.coverage.start() + code_ran = True + try: + runner.run() + except NoSource: + code_ran = False + raise + finally: + self.coverage.stop() + if code_ran: + self.coverage.save() + + return OK + + def do_debug(self, args: list[str]) -> int: + """Implementation of 'coverage debug'.""" + + if not args: + show_help( + "What information would you like: " + + "config, data, sys, premain, pybehave, sqlite?" + ) + return ERR + if args[1:]: + show_help("Only one topic at a time, please") + return ERR + + if args[0] == "sys": + write_formatted_info(print, "sys", self.coverage.sys_info()) + elif args[0] == "data": + print(info_header("data")) + data_file = self.coverage.config.data_file + debug_data_file(data_file) + for filename in combinable_files(data_file): + print("-----") + debug_data_file(filename) + elif args[0] == "config": + write_formatted_info(print, "config", self.coverage.config.debug_info()) + elif args[0] == "premain": + print(info_header("premain")) + print(short_stack(full=True)) + elif args[0] == "pybehave": + write_formatted_info(print, "pybehave", env.debug_info()) + elif args[0] == "sqlite": + write_formatted_info(print, "sqlite", CoverageData.sys_info()) + else: + show_help(f"Don't know what you mean by {args[0]!r}") + return ERR + + return OK + + +def unshell_list(s: str) -> list[str] | None: + """Turn a command-line argument into a list.""" + if not s: + return None + if env.WINDOWS: + # When running coverage.py as coverage.exe, some of the behavior + # of the shell is emulated: wildcards are expanded into a list of + # file names. So you have to single-quote patterns on the command + # line, but (not) helpfully, the single quotes are included in the + # argument, so we have to strip them off here. + s = s.strip("'") + return s.split(",") + + +def unglob_args(args: list[str]) -> list[str]: + """Interpret shell wildcards for platforms that need it.""" + if env.WINDOWS: + globbed = [] + for arg in args: + if "?" in arg or "*" in arg: + globbed.extend(glob.glob(arg)) + else: + globbed.append(arg) + args = globbed + return args + + +HELP_TOPICS = { + "help": """\ + Coverage.py, version {__version__} {extension_modifier} + Measure, collect, and report on code coverage in Python programs. + + usage: {program_name} [options] [args] + + Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + debug Display information about the internals of coverage.py + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + json Create a JSON report of coverage results. + lcov Create an LCOV report of coverage results. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. + + Use "{program_name} help " for detailed help on any command. + """, + "minimum_help": oneline( + """ + Code coverage for Python, version {__version__} {extension_modifier}. + Use '{program_name} help' for help. + """ + ), + "version": "Coverage.py, version {__version__} {extension_modifier}", +} + + +def main(argv: list[str] | None = None) -> int | None: + """The main entry point to coverage.py. + + This is installed as the script entry point. + + """ + if argv is None: + argv = sys.argv[1:] + try: + status = CoverageScript().command_line(argv) + except _ExceptionDuringRun as err: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an _ExceptionDuringRun + # exception. + traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter + status = ERR + except CoverageException as err: + # A controlled error inside coverage.py: print the message to the user. + msg = err.args[0] + if err.slug: + msg = f"{msg.rstrip('.')}; see {__url__}/messages.html#error-{err.slug}" + print(msg) + status = ERR + except SystemExit as err: + # The user called `sys.exit()`. Exit with their argument, if any. + if err.args: + status = err.args[0] + else: + status = None + return status + + +# Profiling using ox_profile. Install it from GitHub: +# pip install git+https://github.com/emin63/ox_profile.git +# +# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. +_profile = os.getenv("COVERAGE_PROFILE") +if _profile: # pragma: debugging + from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error + + original_main = main + + def main( # pylint: disable=function-redefined + argv: list[str] | None = None, + ) -> int | None: + """A wrapper around main that profiles.""" + profiler = SimpleLauncher.launch() + try: + return original_main(argv) + finally: + data, _ = profiler.query(re_filter="coverage", max_records=100) + print(profiler.show(query=data, limit=100, sep="", col="")) + profiler.cancel() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/collector.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/collector.py new file mode 100644 index 0000000..45ba7b2 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/collector.py @@ -0,0 +1,486 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import contextlib +import functools +import os +import sys +from collections.abc import Mapping +from types import FrameType +from typing import Any, Callable, TypeVar, cast + +from coverage import env +from coverage.core import Core +from coverage.data import CoverageData +from coverage.debug import short_stack +from coverage.exceptions import ConfigError +from coverage.misc import human_sorted_items, isolate_module +from coverage.plugin import CoveragePlugin +from coverage.types import ( + TArc, + TCheckIncludeFn, + TFileDisposition, + Tracer, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFn, + TWarnFn, +) + +os = isolate_module(os) + + +T = TypeVar("T") + + +class Collector: + """Collects trace data. + + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + + When the Collector is started, it creates a Tracer for the current thread, + and installs a function to create Tracers for each new thread started. + When the Collector is stopped, all active Tracers are stopped. + + Threads started while the Collector is stopped will never have Tracers + associated with them. + + """ + + # The stack of active Collectors. Collectors are added here when started, + # and popped when stopped. Collectors on the stack are paused when not + # the top, and resumed when they become the top again. + _collectors: list[Collector] = [] + + def __init__( + self, + core: Core, + should_trace: TShouldTraceFn, + check_include: TCheckIncludeFn, + should_start_context: TShouldStartContextFn | None, + file_mapper: Callable[[str], str], + branch: bool, + warn: TWarnFn, + concurrency: list[str], + ) -> None: + """Create a collector. + + `should_trace` is a function, taking a file name and a frame, and + returning a `coverage.FileDisposition object`. + + `check_include` is a function taking a file name and a frame. It returns + a boolean: True if the file should be traced, False if not. + + `should_start_context` is a function taking a frame, and returning a + string. If the frame should be the start of a new context, the string + is the new context. If the frame should not be the start of a new + context, return None. + + `file_mapper` is a function taking a filename, and returning a Unicode + filename. The result is the name that will be recorded in the data + file. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + + `warn` is a warning function, taking a single string message argument + and an optional slug argument which will be a string or None, to be + used if a warning needs to be issued. + + `concurrency` is a list of strings indicating the concurrency libraries + in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" + (the default). "thread" can be combined with one of the other three. + Other values are ignored. + + """ + self.core = core + self.should_trace = should_trace + self.check_include = check_include + self.should_start_context = should_start_context + self.file_mapper = file_mapper + self.branch = branch + self.warn = warn + assert isinstance(concurrency, list), f"Expected a list: {concurrency!r}" + + self.pid = os.getpid() + + self.covdata: CoverageData + self.threading = None + self.static_context: str | None = None + + self.origin = short_stack() + + self.concur_id_func = None + + do_threading = False + + tried = "nothing" # to satisfy pylint + try: + if "greenlet" in concurrency: + tried = "greenlet" + import greenlet + + self.concur_id_func = greenlet.getcurrent + elif "eventlet" in concurrency: + tried = "eventlet" + import eventlet.greenthread + + self.concur_id_func = eventlet.greenthread.getcurrent + elif "gevent" in concurrency: + tried = "gevent" + import gevent + + self.concur_id_func = gevent.getcurrent + + if "thread" in concurrency: + do_threading = True + except ImportError as ex: + msg = f"Couldn't trace with concurrency={tried}, the module isn't installed." + raise ConfigError(msg) from ex + + if self.concur_id_func and not hasattr(core.tracer_class, "concur_id_func"): + raise ConfigError( + "Can't support concurrency={} with {}, only threads are supported.".format( + tried, + self.tracer_name(), + ), + ) + + if do_threading or not concurrency: + # It's important to import threading only if we need it. If + # it's imported early, and the program being measured uses + # gevent, then gevent's monkey-patching won't work properly. + import threading + + self.threading = threading + + self.reset() + + def __repr__(self) -> str: + return f"" + + def use_data(self, covdata: CoverageData, context: str | None) -> None: + """Use `covdata` for recording data.""" + self.covdata = covdata + self.static_context = context + self.covdata.set_context(self.static_context) + + def tracer_name(self) -> str: + """Return the class name of the tracer we're using.""" + return self.core.tracer_class.__name__ + + def _clear_data(self) -> None: + """Clear out existing data, but stay ready for more collection.""" + # We used to use self.data.clear(), but that would remove filename + # keys and data values that were still in use higher up the stack + # when we are called as part of switch_context. + with self.data_lock or contextlib.nullcontext(): + for d in self.data.values(): + d.clear() + + for tracer in self.tracers: + tracer.reset_activity() + + def reset(self) -> None: + """Clear collected data, and prepare to collect more.""" + self.data_lock = self.threading.Lock() if self.threading else None + + # The trace data we are collecting. + self.data: TTraceData = {} + + # A dictionary mapping file names to file tracer plugin names that will + # handle them. + self.file_tracers: dict[str, str] = {} + + self.disabled_plugins: set[str] = set() + + # The .should_trace_cache attribute is a cache from file names to + # coverage.FileDisposition objects, or None. When a file is first + # considered for tracing, a FileDisposition is obtained from + # Coverage.should_trace. Its .trace attribute indicates whether the + # file should be traced or not. If it should be, a plugin with dynamic + # file names can decide not to trace it based on the dynamic file name + # being excluded by the inclusion rules, in which case the + # FileDisposition will be replaced by None in the cache. + if env.PYPY: + import __pypy__ # pylint: disable=import-error + + # Alex Gaynor said: + # should_trace_cache is a strictly growing key: once a key is in + # it, it never changes. Further, the keys used to access it are + # generally constant, given sufficient context. That is to say, at + # any given point _trace() is called, pypy is able to know the key. + # This is because the key is determined by the physical source code + # line, and that's invariant with the call site. + # + # This property of a dict with immutable keys, combined with + # call-site-constant keys is a match for PyPy's module dict, + # which is optimized for such workloads. + # + # This gives a 20% benefit on the workload described at + # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage + self.should_trace_cache = __pypy__.newdict("module") + else: + self.should_trace_cache = {} + + # Our active Tracers. + self.tracers: list[Tracer] = [] + + self._clear_data() + + def lock_data(self) -> None: + """Lock self.data_lock, for use by the C tracer.""" + if self.data_lock is not None: + self.data_lock.acquire() + + def unlock_data(self) -> None: + """Unlock self.data_lock, for use by the C tracer.""" + if self.data_lock is not None: + self.data_lock.release() + + def _start_tracer(self) -> TTraceFn | None: + """Start a new Tracer object, and store it in self.tracers.""" + tracer = self.core.tracer_class(**self.core.tracer_kwargs) + tracer.data = self.data + tracer.lock_data = self.lock_data + tracer.unlock_data = self.unlock_data + tracer.trace_arcs = self.branch + tracer.should_trace = self.should_trace + tracer.should_trace_cache = self.should_trace_cache + tracer.warn = self.warn + + if hasattr(tracer, "concur_id_func"): + tracer.concur_id_func = self.concur_id_func + if hasattr(tracer, "file_tracers"): + tracer.file_tracers = self.file_tracers + if hasattr(tracer, "threading"): + tracer.threading = self.threading + if hasattr(tracer, "check_include"): + tracer.check_include = self.check_include + if hasattr(tracer, "should_start_context"): + tracer.should_start_context = self.should_start_context + if hasattr(tracer, "switch_context"): + tracer.switch_context = self.switch_context + if hasattr(tracer, "disable_plugin"): + tracer.disable_plugin = self.disable_plugin + + fn = tracer.start() + self.tracers.append(tracer) + + return fn + + # The trace function has to be set individually on each thread before + # execution begins. Ironically, the only support the threading module has + # for running code before the thread main is the tracing function. So we + # install this as a trace function, and the first time it's called, it does + # the real trace installation. + # + # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 + + def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None: + """Called on new threads, installs the real tracer.""" + # Remove ourselves as the trace function. + sys.settrace(None) + # Install the real tracer. + fn: TTraceFn | None = self._start_tracer() + # Invoke the real trace function with the current event, to be sure + # not to lose an event. + if fn: + fn = fn(frame, event, arg) + # Return the new trace function to continue tracing in this scope. + return fn + + def start(self) -> None: + """Start collecting trace information.""" + # We may be a new collector in a forked process. The old process' + # collectors will be in self._collectors, but they won't be usable. + # Find them and discard them. + keep_collectors = [] + for c in self._collectors: + if c.pid == self.pid: + keep_collectors.append(c) + else: + c.post_fork() + self._collectors[:] = keep_collectors + + if self._collectors: + self._collectors[-1].pause() + + self.tracers = [] + + try: + # Install the tracer on this thread. + self._start_tracer() + except: + if self._collectors: + self._collectors[-1].resume() + raise + + # If _start_tracer succeeded, then we add ourselves to the global + # stack of collectors. + self._collectors.append(self) + + # Install our installation tracer in threading, to jump-start other + # threads. + if self.core.systrace and self.threading: + self.threading.settrace(self._installation_trace) + + def stop(self) -> None: + """Stop collecting trace information.""" + assert self._collectors + if self._collectors[-1] is not self: + print("self._collectors:") + for c in self._collectors: + print(f" {c!r}\n{c.origin}") + assert self._collectors[-1] is self, ( + f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}" + ) + + self.pause() + + # Remove this Collector from the stack, and resume the one underneath (if any). + self._collectors.pop() + if self._collectors: + self._collectors[-1].resume() + + def pause(self) -> None: + """Pause tracing, but be prepared to `resume`.""" + for tracer in self.tracers: + tracer.stop() + stats = tracer.get_stats() + if stats: + print(f"\nCoverage.py {tracer.__class__.__name__} stats:") + for k, v in human_sorted_items(stats.items()): + print(f"{k:>20}: {v}") + if self.threading: + self.threading.settrace(None) + + def resume(self) -> None: + """Resume tracing after a `pause`.""" + for tracer in self.tracers: + tracer.start() + if self.core.systrace: + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() + + def post_fork(self) -> None: + """After a fork, tracers might need to adjust.""" + for tracer in self.tracers: + if hasattr(tracer, "post_fork"): + tracer.post_fork() + + def _activity(self) -> bool: + """Has any activity been traced? + + Returns a boolean, True if any trace function was invoked. + + """ + return any(tracer.activity() for tracer in self.tracers) + + def switch_context(self, new_context: str | None) -> None: + """Switch to a new dynamic context.""" + context: str | None + self.flush_data() + if self.static_context: + context = self.static_context + if new_context: + context += "|" + new_context + else: + context = new_context + self.covdata.set_context(context) + + def disable_plugin(self, disposition: TFileDisposition) -> None: + """Disable the plugin mentioned in `disposition`.""" + file_tracer = disposition.file_tracer + assert file_tracer is not None + plugin = file_tracer._coverage_plugin + plugin_name = plugin._coverage_plugin_name + self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") + plugin._coverage_enabled = False + disposition.trace = False + + @functools.cache # pylint: disable=method-cache-max-size-none + def cached_mapped_file(self, filename: str) -> str: + """A locally cached version of file names mapped through file_mapper.""" + return self.file_mapper(filename) + + def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]: + """Return a dict like d, but with keys modified by file_mapper.""" + # The call to list(items()) ensures that the GIL protects the dictionary + # iterator against concurrent modifications by tracers running + # in other threads. We try three times in case of concurrent + # access, hoping to get a clean copy. + runtime_err = None + for _ in range(3): # pragma: part covered + try: + items = list(d.items()) + except RuntimeError as ex: # pragma: cant happen + runtime_err = ex + else: + break + else: # pragma: cant happen + assert isinstance(runtime_err, Exception) + raise runtime_err + + return {self.cached_mapped_file(k): v for k, v in items if v} + + def plugin_was_disabled(self, plugin: CoveragePlugin) -> None: + """Record that `plugin` was disabled during the run.""" + self.disabled_plugins.add(plugin._coverage_plugin_name) + + def flush_data(self) -> bool: + """Save the collected data to our associated `CoverageData`. + + Data may have also been saved along the way. This forces the + last of the data to be saved. + + Returns True if there was data to save, False if not. + """ + if not self._activity(): + return False + + if self.branch: + if self.core.packed_arcs: + # Unpack the line number pairs packed into integers. See + # tracer.c:CTracer_record_pair for the C code that creates + # these packed ints. + arc_data: dict[str, list[TArc]] = {} + packed_data = cast(dict[str, set[int]], self.data) + + # The list() here and in the inner loop are to get a clean copy + # even as tracers are continuing to add data. + for fname, packeds in list(packed_data.items()): + tuples = [] + for packed in list(packeds): + l1 = packed & 0xFFFFF + l2 = (packed & (0xFFFFF << 20)) >> 20 + if packed & (1 << 40): + l1 *= -1 + if packed & (1 << 41): + l2 *= -1 + tuples.append((l1, l2)) + arc_data[fname] = tuples + else: + arc_data = cast(dict[str, list[TArc]], self.data) + self.covdata.add_arcs(self.mapped_file_dict(arc_data)) + else: + line_data = cast(dict[str, set[int]], self.data) + self.covdata.add_lines(self.mapped_file_dict(line_data)) + + file_tracers = { + k: v for k, v in self.file_tracers.items() if v not in self.disabled_plugins + } + self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) + + self._clear_data() + return True diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/config.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/config.py new file mode 100644 index 0000000..274d71d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/config.py @@ -0,0 +1,731 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Config file for coverage.py""" + +from __future__ import annotations + +import base64 +import collections +import configparser +import copy +import json +import os +import os.path +import re +from collections.abc import Iterable +from typing import Any, Callable, Final, Mapping + +from coverage.exceptions import ConfigError +from coverage.misc import human_sorted_items, isolate_module, substitute_variables +from coverage.tomlconfig import TomlConfigParser, TomlDecodeError +from coverage.types import ( + TConfigSectionIn, + TConfigSectionOut, + TConfigurable, + TConfigValueIn, + TConfigValueOut, + TPluginConfig, +) + +os = isolate_module(os) + + +class HandyConfigParser(configparser.ConfigParser): + """Our specialization of ConfigParser.""" + + def __init__(self, our_file: bool) -> None: + """Create the HandyConfigParser. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + """ + + super().__init__(interpolation=None) + self.section_prefixes = ["coverage:"] + if our_file: + self.section_prefixes.append("") + + def read( # type: ignore[override] + self, + filenames: Iterable[str], + encoding_unused: str | None = None, + ) -> list[str]: + """Read a file name as UTF-8 configuration data.""" + return super().read(filenames, encoding="utf-8") + + def real_section(self, section: str) -> str | None: + """Get the actual name of a section.""" + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + has = super().has_section(real_section) + if has: + return real_section + return None + + def has_option(self, section: str, option: str) -> bool: # type: ignore[override] + real_section = self.real_section(section) + if real_section is not None: + return super().has_option(real_section, option) + return False + + def has_section(self, section: str) -> bool: # type: ignore[override] + return bool(self.real_section(section)) + + def options(self, section: str) -> list[str]: # type: ignore[override] + real_section = self.real_section(section) + if real_section is not None: + return super().options(real_section) + raise ConfigError(f"No section: {section!r}") + + def get_section(self, section: str) -> TConfigSectionOut: + """Get the contents of a section, as a dictionary.""" + d: dict[str, TConfigValueOut] = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + + def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore + """Get a value, replacing environment variables also. + + The arguments are the same as `ConfigParser.get`, but in the found + value, ``$WORD`` or ``${WORD}`` are replaced by the value of the + environment variable ``WORD``. + + Returns the finished value. + + """ + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + if super().has_option(real_section, option): + break + else: + raise ConfigError(f"No option {option!r} in section: {section!r}") + + v: str = super().get(real_section, option, *args, **kwargs) + v = substitute_variables(v, os.environ) + return v + + def getfile(self, section: str, option: str) -> str: + """Fix up a file path setting.""" + path = self.get(section, option) + return process_file_value(path) + + def getlist(self, section: str, option: str) -> list[str]: + """Read a list of strings. + + The value of `section` and `option` is treated as a comma- and newline- + separated list of strings. Each value is stripped of white space. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + values = [] + for value_line in value_list.split("\n"): + for value in value_line.split(","): + value = value.strip() + if value: + values.append(value) + return values + + def getregexlist(self, section: str, option: str) -> list[str]: + """Read a list of full-line regexes. + + The value of `section` and `option` is treated as a newline-separated + list of regexes. Each value is stripped of white space. + + Returns the list of strings. + + """ + line_list = self.get(section, option) + return process_regexlist(section, option, line_list.splitlines()) + + +TConfigParser = HandyConfigParser | TomlConfigParser + + +# The default line exclusion regexes. +DEFAULT_EXCLUDE = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)", + r"^\s*(((async )?def .*?)?\)(\s*->.*?)?:\s*)?\.\.\.\s*(#|$)", + r"if (typing\.)?TYPE_CHECKING:", +] + +# The default partial branch regexes, to be modified by the user. +DEFAULT_PARTIAL = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)", +] + +# The default partial branch regexes, based on Python semantics. +# These are any Python branching constructs that can't actually execute all +# their branches. +DEFAULT_PARTIAL_ALWAYS = [ + "while (True|1|False|0):", + "if (True|1|False|0):", +] + + +class CoverageConfig(TConfigurable, TPluginConfig): + """Coverage.py configuration. + + The attributes of this class are the various settings that control the + operation of coverage.py. + + """ + + # pylint: disable=too-many-instance-attributes + + def __init__(self) -> None: + """Initialize the configuration attributes to their defaults.""" + # Metadata about the config. + # We tried to read these config files. + self.config_files_attempted: list[str] = [] + # We did read these config files, but maybe didn't find any content for us. + self.config_files_read: list[str] = [] + # The file that gave us our configuration. + self.config_file: str | None = None + self._config_contents: bytes | None = None + + # Defaults for [run] and [report] + self._include = None + self._omit = None + + # Defaults for [run] + self.branch = False + self.command_line: str | None = None + self.concurrency: list[str] = [] + self.context: str | None = None + self.core: str | None = None + self.cover_pylib = False + self.data_file = ".coverage" + self.debug: list[str] = [] + self.debug_file: str | None = None + self.disable_warnings: list[str] = [] + self.dynamic_context: str | None = None + self.parallel = False + self.patch: list[str] = [] + self.plugins: list[str] = [] + self.relative_files = False + self.run_include: list[str] = [] + self.run_omit: list[str] = [] + self.sigterm = False + self.source: list[str] | None = None + self.source_pkgs: list[str] = [] + self.source_dirs: list[str] = [] + self.timid = False + self._crash: str | None = None + + # Defaults for [report] + self.exclude_list = DEFAULT_EXCLUDE[:] + self.exclude_also: list[str] = [] + self.fail_under = 0.0 + self.format: str | None = None + self.ignore_errors = False + self.include_namespace_packages = False + self.report_include: list[str] | None = None + self.report_omit: list[str] | None = None + self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.partial_list = DEFAULT_PARTIAL[:] + self.partial_also: list[str] = [] + self.precision = 0 + self.report_contexts: list[str] | None = None + self.show_missing = False + self.skip_covered = False + self.skip_empty = False + self.sort: str | None = None + + # Defaults for [html] + self.extra_css: str | None = None + self.html_dir = "htmlcov" + self.html_skip_covered: bool | None = None + self.html_skip_empty: bool | None = None + self.html_title = "Coverage report" + self.show_contexts = False + + # Defaults for [xml] + self.xml_output = "coverage.xml" + self.xml_package_depth = 99 + + # Defaults for [json] + self.json_output = "coverage.json" + self.json_pretty_print = False + self.json_show_contexts = False + + # Defaults for [lcov] + self.lcov_output = "coverage.lcov" + self.lcov_line_checksums = False + + # Defaults for [paths] + self.paths: dict[str, list[str]] = {} + + # Options for plugins + self.plugin_options: dict[str, TConfigSectionOut] = {} + + MUST_BE_LIST = { + "debug", + "concurrency", + "plugins", + "report_omit", + "report_include", + "run_omit", + "run_include", + "patch", + } + + # File paths to make absolute during serialization. + # The pairs are (config_key, must_exist). + SERIALIZE_ABSPATH = { + ("data_file", False), + ("debug_file", False), + # `source` can be directories or modules, so don't abspath it if it + # doesn't exist. + ("source", True), + ("source_dirs", False), + } + + def from_args(self, **kwargs: TConfigValueIn) -> None: + """Read config values from `kwargs`.""" + for k, v in kwargs.items(): + if v is not None: + if k in self.MUST_BE_LIST and isinstance(v, str): + v = [v] + setattr(self, k, v) + + def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool: + """Read configuration from a .rc file. + + `filename` is a file name to read. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + + Returns True or False, whether the file could be read, and it had some + coverage.py settings in it. + + """ + _, ext = os.path.splitext(filename) + cp: TConfigParser + if ext == ".toml": + cp = TomlConfigParser(our_file) + else: + cp = HandyConfigParser(our_file) + + self.config_files_attempted.append(os.path.abspath(filename)) + + try: + files_read = cp.read(filename) + except (configparser.Error, TomlDecodeError) as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + if not files_read: + return False + + self.config_files_read.extend(map(os.path.abspath, files_read)) + + any_set = False + try: + for option_spec in self.CONFIG_FILE_OPTIONS: + was_set = self._set_attr_from_config_option(cp, *option_spec) + if was_set: + any_set = True + except ValueError as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + + # Check that there are no unrecognized options. + all_options = collections.defaultdict(set) + for option_spec in self.CONFIG_FILE_OPTIONS: + section, option = option_spec[1].split(":") + all_options[section].add(option) + + for section, options in all_options.items(): + real_section = cp.real_section(section) + if real_section: + for unknown in set(cp.options(section)) - options: + warn( + "Unrecognized option '[{}] {}=' in config file {}".format( + real_section, + unknown, + filename, + ), + ) + + # [paths] is special + if cp.has_section("paths"): + for option in cp.options("paths"): + self.paths[option] = cp.getlist("paths", option) + any_set = True + + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + any_set = True + + # Was this file used as a config file? If it's specifically our file, + # then it was used. If we're piggybacking on someone else's file, + # then it was only used if we found some settings in it. + if our_file: + used = True + else: + used = any_set + + if used: + self.config_file = os.path.abspath(filename) + with open(filename, "rb") as f: + self._config_contents = f.read() + + return used + + def copy(self) -> CoverageConfig: + """Return a copy of the configuration.""" + return copy.deepcopy(self) + + CONCURRENCY_CHOICES: Final[set[str]] = { + "thread", + "gevent", + "greenlet", + "eventlet", + "multiprocessing", + } + + # Mutually exclusive concurrency settings. + LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} + + CONFIG_FILE_OPTIONS = [ + # These are *args for _set_attr_from_config_option: + # (attr, where, type_="") + # + # attr is the attribute to set on the CoverageConfig object. + # where is the section:name to read from the configuration file. + # type_ is the optional type to apply, by using .getTYPE to read the + # configuration value from the file. + # + # [run] + ("branch", "run:branch", "boolean"), + ("command_line", "run:command_line"), + ("concurrency", "run:concurrency", "list"), + ("context", "run:context"), + ("core", "run:core"), + ("cover_pylib", "run:cover_pylib", "boolean"), + ("data_file", "run:data_file", "file"), + ("debug", "run:debug", "list"), + ("debug_file", "run:debug_file", "file"), + ("disable_warnings", "run:disable_warnings", "list"), + ("dynamic_context", "run:dynamic_context"), + ("parallel", "run:parallel", "boolean"), + ("patch", "run:patch", "list"), + ("plugins", "run:plugins", "list"), + ("relative_files", "run:relative_files", "boolean"), + ("run_include", "run:include", "list"), + ("run_omit", "run:omit", "list"), + ("sigterm", "run:sigterm", "boolean"), + ("source", "run:source", "list"), + ("source_pkgs", "run:source_pkgs", "list"), + ("source_dirs", "run:source_dirs", "list"), + ("timid", "run:timid", "boolean"), + ("_crash", "run:_crash"), + # + # [report] + ("exclude_list", "report:exclude_lines", "regexlist"), + ("exclude_also", "report:exclude_also", "regexlist"), + ("fail_under", "report:fail_under", "float"), + ("format", "report:format"), + ("ignore_errors", "report:ignore_errors", "boolean"), + ("include_namespace_packages", "report:include_namespace_packages", "boolean"), + ("partial_always_list", "report:partial_branches_always", "regexlist"), + ("partial_list", "report:partial_branches", "regexlist"), + ("partial_also", "report:partial_also", "regexlist"), + ("precision", "report:precision", "int"), + ("report_contexts", "report:contexts", "list"), + ("report_include", "report:include", "list"), + ("report_omit", "report:omit", "list"), + ("show_missing", "report:show_missing", "boolean"), + ("skip_covered", "report:skip_covered", "boolean"), + ("skip_empty", "report:skip_empty", "boolean"), + ("sort", "report:sort"), + # + # [html] + ("extra_css", "html:extra_css"), + ("html_dir", "html:directory", "file"), + ("html_skip_covered", "html:skip_covered", "boolean"), + ("html_skip_empty", "html:skip_empty", "boolean"), + ("html_title", "html:title"), + ("show_contexts", "html:show_contexts", "boolean"), + # + # [xml] + ("xml_output", "xml:output", "file"), + ("xml_package_depth", "xml:package_depth", "int"), + # + # [json] + ("json_output", "json:output", "file"), + ("json_pretty_print", "json:pretty_print", "boolean"), + ("json_show_contexts", "json:show_contexts", "boolean"), + # + # [lcov] + ("lcov_output", "lcov:output", "file"), + ("lcov_line_checksums", "lcov:line_checksums", "boolean"), + ] + + def _set_attr_from_config_option( + self, + cp: TConfigParser, + attr: str, + where: str, + type_: str = "", + ) -> bool: + """Set an attribute on self if it exists in the ConfigParser. + + Returns True if the attribute was set. + + """ + section, option = where.split(":") + if cp.has_option(section, option): + method = getattr(cp, f"get{type_}") + setattr(self, attr, method(section, option)) + return True + return False + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + # Special-cased options. + if option_name == "paths": + # This is ugly, but type-checks and ensures the values are close + # to right. + self.paths = {} + assert isinstance(value, Mapping) + for k, v in value.items(): + assert isinstance(v, Iterable) + self.paths[k] = list(v) + return + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + setattr(self, attr, value) + return + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] + return + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + # Special-cased options. + if option_name == "paths": + return self.paths + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + return getattr(self, attr) # type: ignore[no-any-return] + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + return self.plugin_options.get(plugin_name, {}).get(key) + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def post_process(self) -> None: + """Make final adjustments to settings to make them usable.""" + self.paths = {k: [process_file_value(f) for f in v] for k, v in self.paths.items()} + + self.exclude_list += self.exclude_also + self.partial_list += self.partial_also + + if "subprocess" in self.patch: + self.parallel = True + + # We can handle a few concurrency options here, but only one at a time. + concurrencies = set(self.concurrency) + unknown = concurrencies - self.CONCURRENCY_CHOICES + if unknown: + show = ", ".join(sorted(unknown)) + raise ConfigError(f"Unknown concurrency choices: {show}") + light_threads = concurrencies & self.LIGHT_THREADS + if len(light_threads) > 1: + show = ", ".join(sorted(light_threads)) + raise ConfigError(f"Conflicting concurrency settings: {show}") + + def debug_info(self) -> list[tuple[str, Any]]: + """Make a list of (name, value) pairs for writing debug info.""" + return human_sorted_items((k, v) for k, v in self.__dict__.items() if not k.startswith("_")) + + def serialize(self) -> str: + """Convert to a string that can be ingested with `deserialize`. + + File paths used by `coverage run` are made absolute to ensure the + deserialized config will refer to the same files. + """ + data = {k: v for k, v in self.__dict__.items() if not k.startswith("_")} + for k, must_exist in self.SERIALIZE_ABSPATH: + abs_fn = abs_path_if_exists if must_exist else os.path.abspath + v = data[k] + if isinstance(v, list): + v = list(map(abs_fn, v)) + elif isinstance(v, str): + v = abs_fn(v) + data[k] = v + return base64.b64encode(json.dumps(data).encode()).decode() + + @classmethod + def deserialize(cls, config_str: str) -> CoverageConfig: + """Take a string from `serialize`, and make a CoverageConfig.""" + data = json.loads(base64.b64decode(config_str.encode()).decode()) + config = cls() + config.__dict__.update(data) + return config + + +def process_file_value(path: str) -> str: + """Make adjustments to a file path to make it usable.""" + return os.path.expanduser(path) + + +def abs_path_if_exists(path: str) -> str: + """os.path.abspath, but only if the path exists.""" + if os.path.exists(path): + return os.path.abspath(path) + else: + return path + + +def process_regexlist(name: str, option: str, values: list[str]) -> list[str]: + """Check the values in a regex list and keep the non-blank ones.""" + value_list = [] + for value in values: + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e + if value: + value_list.append(value) + return value_list + + +def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]: + """What config files should we try to read? + + Returns a list of tuples: + (filename, is_our_file, was_file_specified) + """ + + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = config_file is not True + if not specified_file: + # No file was specified. Check COVERAGE_RCFILE. + rcfile = os.getenv("COVERAGE_RCFILE") + if rcfile: + config_file = rcfile + specified_file = True + if not specified_file: + # Still no file specified. Default to .coveragerc + config_file = ".coveragerc" + assert isinstance(config_file, str) + files_to_try = [ + (config_file, True, specified_file), + ("setup.cfg", False, False), + ("tox.ini", False, False), + ("pyproject.toml", False, False), + ] + return files_to_try + + +def read_coverage_config( + config_file: bool | str, + warn: Callable[[str], None], + **kwargs: TConfigValueIn, +) -> CoverageConfig: + """Read the coverage.py configuration. + + Arguments: + config_file: a boolean or string, see the `Coverage` class for the + tricky details. + warn: a function to issue warnings. + all others: keyword arguments from the `Coverage` class, used for + setting values in the configuration. + + Returns: + config: + config is a CoverageConfig object read from the appropriate + configuration file. + + """ + # Build the configuration from a number of sources: + # 1) defaults: + config = CoverageConfig() + + # 2) from a file: + if config_file: + files_to_try = config_files_to_try(config_file) + + for fname, our_file, specified_file in files_to_try: + config_read = config.from_file(fname, warn, our_file=our_file) + if config_read: + break + if specified_file: + raise ConfigError(f"Couldn't read {fname!r} as a config file") + + # 3) from environment variables: + env_data_file = os.getenv("COVERAGE_FILE") + if env_data_file: + config.data_file = env_data_file + + # $set_env.py: COVERAGE_DEBUG - Debug options: https://coverage.rtfd.io/cmd.html#debug + debugs = os.getenv("COVERAGE_DEBUG") + if debugs: + config.debug.extend(d.strip() for d in debugs.split(",")) + + # Read the COVERAGE_CORE environment variable for backward compatibility, + # and because we use it in the test suite to pick a specific core. + env_core = os.getenv("COVERAGE_CORE") + if env_core: + config.core = env_core + + # 4) from constructor arguments: + config.from_args(**kwargs) + + # 5) for our benchmark, force settings using a secret environment variable: + force_file = os.getenv("COVERAGE_FORCE_CONFIG") + if force_file: + config.from_file(force_file, warn, our_file=True) + + # Once all the config has been collected, there's a little post-processing + # to do. + config.post_process() + + return config diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/context.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/context.py new file mode 100644 index 0000000..bb4b3a1 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/context.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Determine contexts for coverage.py""" + +from __future__ import annotations + +from collections.abc import Sequence +from types import FrameType + +from coverage.types import TShouldStartContextFn + + +def combine_context_switchers( + context_switchers: Sequence[TShouldStartContextFn], +) -> TShouldStartContextFn | None: + """Create a single context switcher from multiple switchers. + + `context_switchers` is a list of functions that take a frame as an + argument and return a string to use as the new context label. + + Returns a function that composites `context_switchers` functions, or None + if `context_switchers` is an empty list. + + When invoked, the combined switcher calls `context_switchers` one-by-one + until a string is returned. The combined switcher returns None if all + `context_switchers` return None. + """ + if not context_switchers: + return None + + if len(context_switchers) == 1: + return context_switchers[0] + + def should_start_context(frame: FrameType) -> str | None: + """The combiner for multiple context switchers.""" + for switcher in context_switchers: + new_context = switcher(frame) + if new_context is not None: + return new_context + return None + + return should_start_context + + +def should_start_context_test_function(frame: FrameType) -> str | None: + """Is this frame calling a test_* function?""" + co_name = frame.f_code.co_name + if co_name.startswith("test") or co_name == "runTest": + return qualname_from_frame(frame) + return None + + +def qualname_from_frame(frame: FrameType) -> str | None: + """Get a qualified name for the code running in `frame`.""" + co = frame.f_code + fname = co.co_name + method = None + if co.co_argcount and co.co_varnames[0] == "self": + self = frame.f_locals.get("self", None) + method = getattr(self, fname, None) + + if method is None: + func = frame.f_globals.get(fname) + if func is None: + return None + return f"{func.__module__}.{fname}" + + func = getattr(method, "__func__", None) + if func is None: + cls = self.__class__ + return f"{cls.__module__}.{cls.__name__}.{fname}" + + return f"{func.__module__}.{func.__qualname__}" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/control.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/control.py new file mode 100644 index 0000000..81308d3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/control.py @@ -0,0 +1,1481 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Central control stuff for coverage.py.""" + +from __future__ import annotations + +import atexit +import collections +import contextlib +import datetime +import functools +import os +import os.path +import platform +import signal +import sys +import threading +import time +import warnings +from collections.abc import Iterable, Iterator +from types import FrameType +from typing import IO, Any, Callable, cast + +from coverage import env +from coverage.annotate import AnnotateReporter +from coverage.collector import Collector +from coverage.config import CoverageConfig, read_coverage_config +from coverage.context import combine_context_switchers, should_start_context_test_function +from coverage.core import CTRACER_FILE, Core +from coverage.data import CoverageData, combine_parallel_data +from coverage.debug import ( + DebugControl, + NoDebugging, + relevant_environment_display, + short_stack, + write_formatted_info, +) +from coverage.disposition import disposition_debug_msg +from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError +from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory +from coverage.html import HtmlReporter +from coverage.inorout import InOrOut +from coverage.jsonreport import JsonReporter +from coverage.lcovreport import LcovReporter +from coverage.misc import ( + DefaultValue, + bool_or_none, + ensure_dir_for_file, + isolate_module, + join_regex, +) +from coverage.multiproc import patch_multiprocessing +from coverage.patch import apply_patches +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins, TCoverageInit +from coverage.python import PythonFileReporter +from coverage.report import SummaryReporter +from coverage.report_core import render_report +from coverage.results import Analysis, analysis_from_file_reporter +from coverage.types import ( + FilePath, + TConfigSectionIn, + TConfigurable, + TConfigValueIn, + TConfigValueOut, + TFileDisposition, + TLineNo, + TMorf, +) +from coverage.version import __url__ +from coverage.xmlreport import XmlReporter + +os = isolate_module(os) + + +@contextlib.contextmanager +def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: + """Temporarily tweak the configuration of `cov`. + + The arguments are applied to `cov.config` with the `from_args` method. + At the end of the with-statement, the old configuration is restored. + """ + original_config = cov.config + cov.config = cov.config.copy() + try: + cov.config.from_args(**kwargs) + yield + finally: + cov.config = original_config + + +DEFAULT_DATAFILE = DefaultValue("MISSING") +_DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility +CONFIG_DATA_PREFIX = ":data:" + + +class Coverage(TConfigurable): + """Programmatic access to coverage.py. + + To use:: + + from coverage import Coverage + + cov = Coverage() + cov.start() + #.. call your code .. + cov.stop() + cov.html_report(directory="covhtml") + + A context manager is available to do the same thing:: + + cov = Coverage() + with cov.collect(): + #.. call your code .. + cov.html_report(directory="covhtml") + + Note: in keeping with Python custom, names starting with underscore are + not part of the public API. They might stop working at any point. Please + limit yourself to documented methods to avoid problems. + + Methods can raise any of the exceptions described in :ref:`api_exceptions`. + + """ + + # The stack of started Coverage instances. + _instances: list[Coverage] = [] + + @classmethod + def current(cls) -> Coverage | None: + """Get the latest started `Coverage` instance, if any. + + Returns: a `Coverage` instance, or None. + + .. versionadded:: 5.0 + + """ + if cls._instances: + return cls._instances[-1] + else: + return None + + def __init__( # pylint: disable=too-many-arguments + self, + data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE, + data_suffix: str | bool | None = None, + cover_pylib: bool | None = None, + auto_data: bool = False, + timid: bool | None = None, + branch: bool | None = None, + config_file: FilePath | bool = True, + source: Iterable[str] | None = None, + source_pkgs: Iterable[str] | None = None, + source_dirs: Iterable[str] | None = None, + omit: str | Iterable[str] | None = None, + include: str | Iterable[str] | None = None, + debug: Iterable[str] | None = None, + concurrency: str | Iterable[str] | None = None, + check_preimported: bool = False, + context: str | None = None, + messages: bool = False, + plugins: Iterable[Callable[..., None]] | None = None, + ) -> None: + """ + Many of these arguments duplicate and override values that can be + provided in a configuration file. Parameters that are missing here + will use values from the config file. + + `data_file` is the base name of the data file to use. The config value + defaults to ".coverage". None can be provided to prevent writing a data + file. `data_suffix` is appended (with a dot) to `data_file` to create + the final file name. If `data_suffix` is simply True, then a suffix is + created with the machine and process identity included. + + `cover_pylib` is a boolean determining whether Python code installed + with the Python interpreter is measured. This includes the Python + standard library and any packages installed with the interpreter. + + If `auto_data` is true, then any existing data file will be read when + coverage measurement starts, and data will be saved automatically when + measurement stops. + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for + these files to not be found. + + * If it is False, then no configuration file is read. + + `source` is a list of file paths or package names. Only code located + in the trees indicated by the file paths or package names will be + measured. + + `source_pkgs` is a list of package names. It works the same as + `source`, but can be used to name packages where the name can also be + interpreted as a file path. + + `source_dirs` is a list of file paths. It works the same as + `source`, but raises an error if the path doesn't exist, rather + than being treated as a package name. + + `include` and `omit` are lists of file name patterns. Files that match + `include` will be measured, files that match `omit` will not. Each + will also accept a single string argument. + + `debug` is a list of strings indicating what debugging information is + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results if these libraries are in use. Valid strings are "greenlet", + "eventlet", "gevent", "multiprocessing", or "thread" (the default). + This can also be a list of these strings. + + If `check_preimported` is true, then when coverage is started, the + already-imported files will be checked to see if they should be + measured by coverage. Importing measured files before coverage is + started can mean that code is missed. + + `context` is a string to use as the :ref:`static context + ` label for collected data. + + If `messages` is true, some messages will be printed to stdout + indicating what is happening. + + If `plugins` are passed, they are an iterable of function objects + accepting a `reg` object to register plugins, as described in + :ref:`api_plugin`. When they are provided, they will override the + plugins found in the coverage configuration file. + + .. versionadded:: 4.0 + The `concurrency` parameter. + + .. versionadded:: 4.2 + The `concurrency` parameter can now be a list of strings. + + .. versionadded:: 5.0 + The `check_preimported` and `context` parameters. + + .. versionadded:: 5.3 + The `source_pkgs` parameter. + + .. versionadded:: 6.0 + The `messages` parameter. + + .. versionadded:: 7.7 + The `plugins` parameter. + + .. versionadded:: 7.8 + The `source_dirs` parameter. + """ + # Start self.config as a usable default configuration. It will soon be + # replaced with the real configuration. + self.config = CoverageConfig() + + # data_file=None means no disk file at all. data_file missing means + # use the value from the config file. + self._no_disk = data_file is None + if isinstance(data_file, DefaultValue): + data_file = None + if data_file is not None: + data_file = os.fspath(data_file) + + # This is injectable by tests. + self._debug_file: IO[str] | None = None + + self._auto_load = self._auto_save = auto_data + self._data_suffix_specified = data_suffix + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + self._warn_preimported_source = check_preimported + self._no_warn_slugs: set[str] = set() + self._messages = messages + + # If we're invoked from a .pth file, we shouldn't try to make another one. + self._make_pth_file = True + + # A record of all the warnings that have been issued. + self._warnings: list[str] = [] + + # Other instance attributes, set with placebos or placeholders. + # More useful objects will be created later. + self._debug: DebugControl = NoDebugging() + self._inorout: InOrOut | None = None + self._plugins: Plugins = Plugins() + self._plugin_override = cast(Iterable[TCoverageInit] | None, plugins) + self._data: CoverageData | None = None + self._data_to_close: list[CoverageData] = [] + self._core: Core | None = None + self._collector: Collector | None = None + self._metacov = False + + self._file_mapper: Callable[[str], str] = abs_file + self._data_suffix = self._run_suffix = None + self._exclude_re: dict[str, str] = {} + self._old_sigterm: Callable[[int, FrameType | None], Any] | None = None + + # State machine variables: + # Have we initialized everything? + self._inited = False + self._inited_for_start = False + # Have we started collecting and not stopped it? + self._started = False + # Should we write the debug output? + self._should_write_debug = True + + # Build our configuration from a number of sources. + if isinstance(config_file, str) and config_file.startswith(CONFIG_DATA_PREFIX): + self.config = CoverageConfig.deserialize(config_file[len(CONFIG_DATA_PREFIX) :]) + else: + if not isinstance(config_file, bool): + config_file = os.fspath(config_file) + self.config = read_coverage_config( + config_file=config_file, + warn=self._warn, + data_file=data_file, + cover_pylib=cover_pylib, + timid=timid, + branch=branch, + parallel=bool_or_none(data_suffix), + source=source, + source_pkgs=source_pkgs, + source_dirs=source_dirs, + run_omit=omit, + run_include=include, + debug=debug, + report_omit=omit, + report_include=include, + concurrency=concurrency, + context=context, + ) + + # If we have subprocess measurement happening automatically, then we + # want any explicit creation of a Coverage object to mean, this process + # is already coverage-aware, so don't auto-measure it. By now, the + # auto-creation of a Coverage object has already happened. But we can + # find it and tell it not to save its data. + if not env.METACOV: + _prevent_sub_process_measurement() + + def _init(self) -> None: + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. + + """ + if self._inited: + return + + self._inited = True + + # Create and configure the debugging controller. + self._debug = DebugControl(self.config.debug, self._debug_file, self.config.debug_file) + if self._debug.should("process"): + self._debug.write("Coverage._init") + + if "multiprocessing" in (self.config.concurrency or ()): + # Multi-processing uses parallel for the subprocesses, so also use + # it for the main process. + self.config.parallel = True + + # _exclude_re is a dict that maps exclusion list names to compiled regexes. + self._exclude_re = {} + + set_relative_directory() + if self.config.relative_files: + self._file_mapper = relative_filename + + # Load plugins + self._plugins = Plugins(self._debug) + if self._plugin_override: + self._plugins.load_from_callables(self._plugin_override) + else: + self._plugins.load_from_config(self.config.plugins, self.config) + + # Run configuring plugins. + for plugin in self._plugins.configurers: + # We need an object with set_option and get_option. Either self or + # self.config will do. Choosing randomly stops people from doing + # other things with those objects, against the public API. Yes, + # this is a bit childish. :) + plugin.configure([self, self.config][int(time.time()) % 2]) + + def _post_init(self) -> None: + """Stuff to do after everything is initialized.""" + if self._should_write_debug: + self._should_write_debug = False + self._write_startup_debug() + + # "[run] _crash" will raise an exception if the value is close by in + # the call stack, for testing error handling. + if self.config._crash and self.config._crash in short_stack(): + raise RuntimeError(f"Crashing because called by {self.config._crash}") + + def _write_startup_debug(self) -> None: + """Write out debug info at startup if needed.""" + wrote_any = False + with self._debug.without_callers(): + if self._debug.should("config"): + write_formatted_info(self._debug.write, "config", self.config.debug_info()) + wrote_any = True + + if self._debug.should("sys"): + write_formatted_info(self._debug.write, "sys", self.sys_info()) + for plugin in self._plugins: + header = "sys: " + plugin._coverage_plugin_name + write_formatted_info(self._debug.write, header, plugin.sys_info()) + wrote_any = True + + if self._debug.should("pybehave"): + write_formatted_info(self._debug.write, "pybehave", env.debug_info()) + wrote_any = True + + if self._debug.should("sqlite"): + write_formatted_info(self._debug.write, "sqlite", CoverageData.sys_info()) + wrote_any = True + + if wrote_any: + write_formatted_info(self._debug.write, "end", ()) + + def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition: + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + assert self._inorout is not None + disp = self._inorout.should_trace(filename, frame) + if self._debug.should("trace"): + self._debug.write(disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + assert self._inorout is not None + reason = self._inorout.check_include_omit_etc(filename, frame) + if self._debug.should("trace"): + if not reason: + msg = f"Including {filename!r}" + else: + msg = f"Not including {filename!r}: {reason}" + self._debug.write(msg) + + return not reason + + def _warn(self, msg: str, slug: str | None = None, once: bool = False) -> None: + """Use `msg` as a warning. + + For warning suppression, use `slug` as the shorthand. + + If `once` is true, only show this warning once (determined by the + slug.) + + """ + if not self._no_warn_slugs: + self._no_warn_slugs = set(self.config.disable_warnings) + + if slug in self._no_warn_slugs: + # Don't issue the warning + return + + self._warnings.append(msg) + if slug: + msg = f"{msg} ({slug}); see {__url__}/messages.html#warning-{slug}" + if self._debug.should("pid"): + msg = f"[{os.getpid()}] {msg}" + warnings.warn(msg, category=CoverageWarning, stacklevel=2) + + if once: + assert slug is not None + self._no_warn_slugs.add(slug) + + def _message(self, msg: str) -> None: + """Write a message to the user, if configured to do so.""" + if self._messages: + print(msg) + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. The type depends on the option + selected. + + As a special case, an `option_name` of ``"paths"`` will return an + dictionary with the entire ``[paths]`` section value. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be an + appropriate Python value. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling: + + .. code-block:: python + + cov.set_option("run:branch", True) + + has the same effect as this configuration file: + + .. code-block:: ini + + [run] + branch = True + + As a special case, an `option_name` of ``"paths"`` will replace the + entire ``[paths]`` section. The value should be a dictionary. + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def load(self) -> None: + """Load previously-collected coverage data from the data file.""" + self._init() + if self._collector is not None: + self._collector.reset() + should_skip = self.config.parallel and not os.path.exists(self.config.data_file) + if not should_skip: + self._init_data(suffix=None) + self._post_init() + if not should_skip: + assert self._data is not None + self._data.read() + + def _init_for_start(self) -> None: + """Initialization for start()""" + # Construct the collector. + concurrency: list[str] = self.config.concurrency + if "multiprocessing" in concurrency: + if self.config.config_file is None: + raise ConfigError("multiprocessing requires a configuration file") + patch_multiprocessing(rcfile=self.config.config_file) + + dycon = self.config.dynamic_context + if not dycon or dycon == "none": + context_switchers = [] + elif dycon == "test_function": + context_switchers = [should_start_context_test_function] + else: + raise ConfigError(f"Don't understand dynamic_context setting: {dycon!r}") + + context_switchers.extend( + plugin.dynamic_context for plugin in self._plugins.context_switchers + ) + + should_start_context = combine_context_switchers(context_switchers) + + self._core = Core( + warn=self._warn, + debug=(self._debug if self._debug.should("core") else None), + config=self.config, + dynamic_contexts=(should_start_context is not None), + metacov=self._metacov, + ) + self._collector = Collector( + core=self._core, + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + should_start_context=should_start_context, + file_mapper=self._file_mapper, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, + ) + + suffix = self._data_suffix_specified + if suffix: + if not isinstance(suffix, str): + # if data_suffix=True, use .machinename.pid.random + suffix = True + elif self.config.parallel: + if suffix is None: + suffix = True + elif not isinstance(suffix, str): + suffix = bool(suffix) + else: + suffix = None + + self._init_data(suffix) + + assert self._data is not None + self._collector.use_data(self._data, self.config.context) + + # Early warning if we aren't going to be able to support plugins. + if self._plugins.file_tracers and not self._core.supports_plugins: + self._warn( + "Plugin file tracers ({}) aren't supported with {}".format( + ", ".join( + plugin._coverage_plugin_name for plugin in self._plugins.file_tracers + ), + self._collector.tracer_name(), + ), + ) + for plugin in self._plugins.file_tracers: + plugin._coverage_enabled = False + + # Create the file classifying substructure. + self._inorout = InOrOut( + config=self.config, + warn=self._warn, + debug=(self._debug if self._debug.should("trace") else None), + include_namespace_packages=self.config.include_namespace_packages, + ) + self._inorout.plugins = self._plugins + self._inorout.disp_class = self._core.file_disposition_class + + # It's useful to write debug info after initing for start. + self._should_write_debug = True + + # Register our clean-up handlers. + atexit.register(self._atexit) + if self.config.sigterm: + is_main = (threading.current_thread() == threading.main_thread()) # fmt: skip + if is_main and not env.WINDOWS: + # The Python docs seem to imply that SIGTERM works uniformly even + # on Windows, but that's not my experience, and this agrees: + # https://stackoverflow.com/questions/35772001/x/35792192#35792192 + self._old_sigterm = signal.signal( # type: ignore[assignment] + signal.SIGTERM, + self._on_sigterm, + ) + + def _init_data(self, suffix: str | bool | None) -> None: + """Create a data file if we don't have one yet.""" + if self._data is None: + # Create the data file. We do this at construction time so that the + # data file will be written into the directory where the process + # started rather than wherever the process eventually chdir'd to. + ensure_dir_for_file(self.config.data_file) + self._data = CoverageData( + basename=self.config.data_file, + suffix=suffix, + warn=self._warn, + debug=self._debug, + no_disk=self._no_disk, + ) + self._data_to_close.append(self._data) + + def start(self) -> None: + """Start measuring code coverage. + + Coverage measurement is only collected in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + The :meth:`collect` method is a context manager to handle both + starting and stopping collection. + + """ + self._init() + if not self._inited_for_start: + self._inited_for_start = True + self._init_for_start() + self._post_init() + + assert self._collector is not None + assert self._inorout is not None + + # Issue warnings for possible problems. + self._inorout.warn_conflicting_settings() + + # See if we think some code that would eventually be measured has + # already been imported. + if self._warn_preimported_source: + self._inorout.warn_already_imported_files() + + if self._auto_load: + self.load() + + apply_patches(self, self.config, self._debug, make_pth_file=self._make_pth_file) + + self._collector.start() + self._started = True + self._instances.append(self) + + def stop(self) -> None: + """Stop measuring code coverage.""" + if self._instances: + if self._instances[-1] is self: + self._instances.pop() + if self._started: + assert self._collector is not None + self._collector.stop() + self._started = False + + @contextlib.contextmanager + def collect(self) -> Iterator[None]: + """A context manager to start/stop coverage measurement collection. + + .. versionadded:: 7.3 + + """ + self.start() + try: + yield + finally: + self.stop() # pragma: nested + + def _atexit(self, event: str = "atexit") -> None: + """Clean up on process shutdown.""" + if self._debug.should("process"): + self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}") + if self._started: + self.stop() + if self._auto_save or event == "sigterm": + self.save() + for d in self._data_to_close: + d.close(force=True) + + def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> None: + """A handler for signal.SIGTERM.""" + self._atexit("sigterm") + # Statements after here won't be seen by metacov because we just wrote + # the data, and are about to kill the process. + signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered + os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered + + def erase(self) -> None: + """Erase previously collected coverage data. + + This removes the in-memory data collected in this session as well as + discarding the data file. + + """ + self._init() + self._post_init() + if self._collector is not None: + self._collector.reset() + self._init_data(suffix=None) + assert self._data is not None + self._data.erase(parallel=self.config.parallel) + self._data = None + self._inited_for_start = False + + def switch_context(self, new_context: str) -> None: + """Switch to a new dynamic context. + + `new_context` is a string to use as the :ref:`dynamic context + ` label for collected data. If a :ref:`static + context ` is in use, the static and dynamic context + labels will be joined together with a pipe character. + + Coverage collection must be started already. + + .. versionadded:: 5.0 + + """ + if not self._started: # pragma: part started + raise CoverageException("Cannot switch context, coverage is not started") + + assert self._collector is not None + if self._collector.should_start_context: + self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True) + + self._collector.switch_context(new_context) + + def clear_exclude(self, which: str = "exclude") -> None: + """Clear the exclude list.""" + self._init() + setattr(self.config, f"{which}_list", []) + self._exclude_regex_stale() + + def exclude(self, regex: str, which: str = "exclude") -> None: + """Exclude source lines from execution consideration. + + A number of lists of regular expressions are maintained. Each list + selects lines that are treated differently during reporting. + + `which` determines which list is modified. The "exclude" list selects + lines that are not considered executable at all. The "partial" list + indicates lines with branches that are not taken. + + `regex` is a regular expression. The regex is added to the specified + list. If any of the regexes in the list is found in a line, the line + is marked for special treatment during reporting. + + """ + self._init() + excl_list = getattr(self.config, f"{which}_list") + excl_list.append(regex) + self._exclude_regex_stale() + + def _exclude_regex_stale(self) -> None: + """Drop all the compiled exclusion regexes, a list was modified.""" + self._exclude_re.clear() + + def _exclude_regex(self, which: str) -> str: + """Return a regex string for the given exclusion list.""" + if which not in self._exclude_re: + excl_list = getattr(self.config, f"{which}_list") + self._exclude_re[which] = join_regex(excl_list) + return self._exclude_re[which] + + def get_exclude_list(self, which: str = "exclude") -> list[str]: + """Return a list of excluded regex strings. + + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. + + """ + self._init() + return cast(list[str], getattr(self.config, f"{which}_list")) + + def save(self) -> None: + """Save the collected coverage data to the data file.""" + data = self.get_data() + data.write() + + def _make_aliases(self) -> PathAliases: + """Create a PathAliases from our configuration.""" + aliases = PathAliases( + debugfn=(self._debug.write if self._debug.should("pathmap") else None), + relative=self.config.relative_files, + ) + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + return aliases + + def combine( + self, + data_paths: Iterable[str] | None = None, + strict: bool = False, + keep: bool = False, + ) -> None: + """Combine together a number of similarly-named coverage data files. + + All coverage data files whose name starts with `data_file` (from the + coverage() constructor) will be read, and combined together into the + current measurements. + + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + If `strict` is true, then it is an error to attempt to combine when + there are no data files to combine. + + If `keep` is true, then original input data files won't be deleted. + + .. versionadded:: 4.0 + The `data_paths` parameter. + + .. versionadded:: 4.3 + The `strict` parameter. + + .. versionadded: 5.5 + The `keep` parameter. + """ + self._init() + self._init_data(suffix=None) + self._post_init() + self.get_data() + + assert self._data is not None + combine_parallel_data( + self._data, + aliases=self._make_aliases(), + data_paths=data_paths, + strict=strict, + keep=keep, + message=self._message, + ) + + def get_data(self) -> CoverageData: + """Get the collected data. + + Also warn about various problems collecting data. + + Returns a :class:`coverage.CoverageData`, the collected coverage data. + + .. versionadded:: 4.0 + + """ + self._init() + self._init_data(suffix=None) + self._post_init() + + if self._collector is not None: + for plugin in self._plugins: + if not plugin._coverage_enabled: + self._collector.plugin_was_disabled(plugin) + + if self._collector.flush_data(): + self._post_save_work() + + assert self._data is not None + return self._data + + def _post_save_work(self) -> None: + """After saving data, look for warnings, post-work, etc. + + Warn about things that should have happened but didn't. + Look for un-executed files. + + """ + assert self._data is not None + assert self._inorout is not None + + # If there are still entries in the source_pkgs_unmatched list, + # then we never encountered those packages. + if self._warn_unimported_source: + self._inorout.warn_unimported_source() + + # Find out if we got any data. + if not self._data and self._warn_no_data: + self._warn("No data was collected.", slug="no-data-collected") + + # Touch all the files that could have executed, so that we can + # mark completely un-executed files as 0% covered. + file_paths = collections.defaultdict(list) + for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): + file_path = self._file_mapper(file_path) + file_paths[plugin_name].append(file_path) + for plugin_name, paths in file_paths.items(): + self._data.touch_files(paths, plugin_name) + + # Backward compatibility with version 1. + def analysis(self, morf: TMorf) -> tuple[str, list[TLineNo], list[TLineNo], str]: + """Like `analysis2` but doesn't return excluded line numbers.""" + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2( + self, + morf: TMorf, + ) -> tuple[str, list[TLineNo], list[TLineNo], list[TLineNo], str]: + """Analyze a module. + + `morf` is a module or a file name. It will be analyzed to determine + its coverage statistics. The return value is a 5-tuple: + + * The file name for the module. + * A list of line numbers of executable statements. + * A list of line numbers of excluded statements. + * A list of line numbers of statements not run (missing from + execution). + * A readable formatted string of the missing line numbers. + + The analysis uses the source file itself and the current measured + coverage data. + + """ + analysis = self._analyze(morf) + return ( + analysis.filename, + sorted(analysis.statements), + sorted(analysis.excluded), + sorted(analysis.missing), + analysis.missing_formatted(), + ) + + @functools.lru_cache(maxsize=1) + def _analyze(self, morf: TMorf) -> Analysis: + """Analyze a module or file. Private for now.""" + self._init() + self._post_init() + + data = self.get_data() + file_reporter = self._get_file_reporter(morf) + filename = self._file_mapper(file_reporter.filename) + return analysis_from_file_reporter(data, self.config.precision, file_reporter, filename) + + def branch_stats(self, morf: TMorf) -> dict[TLineNo, tuple[int, int]]: + """Get branch statistics about a module. + + `morf` is a module or a file name. + + Returns a dict mapping line numbers to a tuple: + (total_exits, taken_exits). + + .. versionadded:: 7.7 + + """ + analysis = self._analyze(morf) + return analysis.branch_stats() + + @functools.lru_cache(maxsize=1) + def _get_file_reporter(self, morf: TMorf) -> FileReporter: + """Get a FileReporter for a module or file name.""" + assert self._data is not None + plugin = None + file_reporter: str | FileReporter = "python" + + if isinstance(morf, str): + mapped_morf = self._file_mapper(morf) + plugin_name = self._data.file_tracer(mapped_morf) + if plugin_name: + plugin = self._plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(mapped_morf) + if file_reporter is None: + raise PluginError( + "Plugin {!r} did not provide a file reporter for {!r}.".format( + plugin._coverage_plugin_name, + morf, + ), + ) + + if file_reporter == "python": + file_reporter = PythonFileReporter(morf, self) + + assert isinstance(file_reporter, FileReporter) + return file_reporter + + def _get_file_reporters( + self, + morfs: Iterable[TMorf] | None = None, + ) -> list[tuple[FileReporter, TMorf]]: + """Get FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + a list pairing FileReporters with the morfs. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + assert self._data is not None + if not morfs: + morfs = self._data.measured_files() + + # Be sure we have a collection. + if not isinstance(morfs, (list, tuple, set)): + morfs = [morfs] # type: ignore[list-item] + + morfs = sorted(morfs, key=lambda m: m if isinstance(m, str) else m.__name__) + return [(self._get_file_reporter(morf), morf) for morf in morfs] + + def _prepare_data_for_reporting(self) -> None: + """Re-map data before reporting, to get implicit "combine" behavior.""" + if self.config.paths: + mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True) + if self._data is not None: + mapped_data.update(self._data, map_path=self._make_aliases().map) + self._data = mapped_data + self._data_to_close.append(mapped_data) + + def report( + self, + morfs: Iterable[TMorf] | None = None, + show_missing: bool | None = None, + ignore_errors: bool | None = None, + file: IO[str] | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + skip_covered: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, + sort: str | None = None, + output_format: str | None = None, + ) -> float: + """Write a textual summary report to `file`. + + Each module in `morfs` is listed, with counts of statements, executed + statements, missing statements, and a list of lines missed. + + If `show_missing` is true, then details of which lines or branches are + missing will be included in the report. If `ignore_errors` is true, + then a failure while reporting a single file will not stop the entire + report. + + `file` is a file-like object, suitable for writing. + + `output_format` determines the format, either "text" (the default), + "markdown", or "total". + + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. + + If `skip_covered` is true, don't report on files with 100% coverage. + + If `skip_empty` is true, don't report on empty files (those that have + no statements). + + `contexts` is a list of regular expression strings. Only data from + :ref:`dynamic contexts ` that match one of those + expressions (using :func:`re.search `) will be + included in the report. + + `precision` is the number of digits to display after the decimal + point for percentages. + + All of the arguments default to the settings read from the + :ref:`configuration file `. + + Returns a float, the total percentage covered. + + .. versionadded:: 4.0 + The `skip_covered` parameter. + + .. versionadded:: 5.0 + The `contexts` and `skip_empty` parameters. + + .. versionadded:: 5.2 + The `precision` parameter. + + .. versionadded:: 7.0 + The `format` parameter. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + show_missing=show_missing, + skip_covered=skip_covered, + report_contexts=contexts, + skip_empty=skip_empty, + precision=precision, + sort=sort, + format=output_format, + ): + reporter = SummaryReporter(self) + return reporter.report(morfs, outfile=file) + + def annotate( + self, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + ) -> None: + """Annotate a list of modules. + + Each module in `morfs` is annotated. The source is written to a new + file, named with a ",cover" suffix, with each line prefixed with a + marker to indicate the coverage of the line. Covered lines have ">", + excluded lines have "-", and missing lines have "!". + + See :meth:`report` for other arguments. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + report_contexts=contexts, + ): + reporter = AnnotateReporter(self) + reporter.report(morfs, directory=directory) + + def html_report( + self, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + extra_css: str | None = None, + title: str | None = None, + skip_covered: bool | None = None, + show_contexts: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, + ) -> float: + """Generate an HTML report. + + The HTML is written to `directory`. The file "index.html" is the + overview starting point, with links to more detailed pages for + individual modules. + + `extra_css` is a path to a file of other CSS to apply on the page. + It will be copied into the HTML directory. + + `title` is a text string (not HTML) to use as the title of the HTML + report. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. note:: + + The HTML report files are generated incrementally based on the + source files and coverage results. If you modify the report files, + the changes will not be considered. You should be careful about + changing the files in the report folder. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + html_dir=directory, + extra_css=extra_css, + html_title=title, + html_skip_covered=skip_covered, + show_contexts=show_contexts, + report_contexts=contexts, + html_skip_empty=skip_empty, + precision=precision, + ): + reporter = HtmlReporter(self) + return reporter.report(morfs) + + def xml_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + ) -> float: + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + xml_output=outfile, + report_contexts=contexts, + skip_empty=skip_empty, + ): + return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message) + + def json_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + pretty_print: bool | None = None, + show_contexts: bool | None = None, + ) -> float: + """Generate a JSON report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + `pretty_print` is a boolean, whether to pretty-print the JSON output or not. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. versionadded:: 5.0 + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + json_output=outfile, + report_contexts=contexts, + json_pretty_print=pretty_print, + json_show_contexts=show_contexts, + ): + return render_report(self.config.json_output, JsonReporter(self), morfs, self._message) + + def lcov_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + ) -> float: + """Generate an LCOV report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + .. versionadded:: 6.3 + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + lcov_output=outfile, + report_contexts=contexts, + ): + return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Return a list of (key, value) pairs showing internal information.""" + + import coverage as covmod + + self._init() + self._post_init() + + def plugin_info(plugins: list[Any]) -> list[str]: + """Make an entry for the sys_info from a list of plug-ins.""" + entries = [] + for plugin in plugins: + entry = plugin._coverage_plugin_name + if not plugin._coverage_enabled: + entry += " (disabled)" + entries.append(entry) + return entries + + info = [ + ("coverage_version", covmod.__version__), + ("coverage_module", covmod.__file__), + ("core", self._collector.tracer_name() if self._collector is not None else "-none-"), + ("CTracer", f"available from {CTRACER_FILE}" if CTRACER_FILE else "unavailable"), + ("plugins.file_tracers", plugin_info(self._plugins.file_tracers)), + ("plugins.configurers", plugin_info(self._plugins.configurers)), + ("plugins.context_switchers", plugin_info(self._plugins.context_switchers)), + ("configs_attempted", self.config.config_files_attempted), + ("configs_read", self.config.config_files_read), + ("config_file", self.config.config_file), + ( + "config_contents", + repr(self.config._config_contents) if self.config._config_contents else "-none-", + ), + ("data_file", self._data.data_filename() if self._data is not None else "-none-"), + ("python", sys.version.replace("\n", "")), + ("platform", platform.platform()), + ("implementation", platform.python_implementation()), + ("build", platform.python_build()), + ("gil_enabled", getattr(sys, "_is_gil_enabled", lambda: True)()), + ("executable", sys.executable), + ("def_encoding", sys.getdefaultencoding()), + ("fs_encoding", sys.getfilesystemencoding()), + ("pid", os.getpid()), + ("cwd", os.getcwd()), + ("path", sys.path), + ("environment", [f"{k} = {v}" for k, v in relevant_environment_display(os.environ)]), + ("command_line", " ".join(getattr(sys, "argv", ["-none-"]))), + ("time", f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S}"), + ] + + if self._inorout is not None: + info.extend(self._inorout.sys_info()) + + return info + + +# Mega debugging... +# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. +if int(os.getenv("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging + from coverage.debug import decorate_methods, show_calls + + Coverage = decorate_methods( # type: ignore[misc] + show_calls(show_args=True), + butnot=["get_data"], + )(Coverage) + + +def process_startup(*, force: bool = False) -> Coverage | None: + """Call this at Python start-up to perhaps measure coverage. + + If the environment variable COVERAGE_PROCESS_START is defined, coverage + measurement is started. The value of the variable is the config file + to use. + + For details, see https://coverage.readthedocs.io/en/latest/subprocess.html. + + Returns the :class:`Coverage` instance that was started, or None if it was + not started by this call. + + """ + config_data = os.getenv("COVERAGE_PROCESS_CONFIG") + cps = os.getenv("COVERAGE_PROCESS_START") + if config_data is not None: + config_file = CONFIG_DATA_PREFIX + config_data + elif cps is not None: + config_file = cps + else: + # No request for coverage, nothing to do. + return None + + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://github.com/coveragepy/coveragepy/issues/340 has more details. + + if not force and hasattr(process_startup, "coverage"): + # We've annotated this function before, so we must have already + # auto-started coverage.py in this process. Nothing to do. + return None + + cov = Coverage(config_file=config_file) + process_startup.coverage = cov # type: ignore[attr-defined] + cov._warn_no_data = False + cov._warn_unimported_source = False + cov._warn_preimported_source = False + cov._auto_save = True + cov._make_pth_file = False + cov.start() + + return cov + + +def _after_fork_in_child() -> None: + """Used by patch=fork in the child process to restart coverage.""" + if cov := Coverage.current(): + cov.stop() + process_startup(force=True) + + +def _prevent_sub_process_measurement() -> None: + """Stop any subprocess auto-measurement from writing data.""" + auto_created_coverage = getattr(process_startup, "coverage", None) + if auto_created_coverage is not None: + auto_created_coverage._auto_save = False diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/core.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/core.py new file mode 100644 index 0000000..64616a6 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/core.py @@ -0,0 +1,139 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Management of core choices.""" + +from __future__ import annotations + +import os +import sys +from typing import Any + +from coverage import env +from coverage.config import CoverageConfig +from coverage.disposition import FileDisposition +from coverage.exceptions import ConfigError +from coverage.misc import isolate_module +from coverage.pytracer import PyTracer +from coverage.sysmon import SysMonitor +from coverage.types import TDebugCtl, TFileDisposition, Tracer, TWarnFn + +os = isolate_module(os) + +IMPORT_ERROR: str = "" + +try: + # Use the C extension code when we can, for speed. + import coverage.tracer + + CTRACER_FILE: str | None = getattr(coverage.tracer, "__file__", "unknown") +except ImportError as imp_err: + # Couldn't import the C extension, maybe it isn't built. + # We still need to check the environment variable directly here, + # as this code runs before configuration is loaded. + if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered + # During testing, we use the COVERAGE_CORE environment variable + # to indicate that we've fiddled with the environment to test this + # fallback code. If we thought we had a C tracer, but couldn't import + # it, then exit quickly and clearly instead of dribbling confusing + # errors. I'm using sys.exit here instead of an exception because an + # exception here causes all sorts of other noise in unittest. + sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n") + sys.exit(1) + IMPORT_ERROR = str(imp_err) + CTRACER_FILE = None + + +class Core: + """Information about the central technology enabling execution measurement.""" + + tracer_class: type[Tracer] + tracer_kwargs: dict[str, Any] + file_disposition_class: type[TFileDisposition] + supports_plugins: bool + packed_arcs: bool + systrace: bool + + def __init__( + self, + *, + warn: TWarnFn, + debug: TDebugCtl | None, + config: CoverageConfig, + dynamic_contexts: bool, + metacov: bool, + ) -> None: + def _debug(msg: str) -> None: + if debug: + debug.write(msg) + + _debug("in core.py") + + # Check the conditions that preclude us from using sys.monitoring. + reason_no_sysmon = "" + if not env.PYBEHAVIOR.pep669: + reason_no_sysmon = "sys.monitoring isn't available in this version" + elif config.branch and not env.PYBEHAVIOR.branch_right_left: + reason_no_sysmon = "sys.monitoring can't measure branches in this version" + elif dynamic_contexts: + reason_no_sysmon = "it doesn't yet support dynamic contexts" + elif any((bad := c) in config.concurrency for c in ["greenlet", "eventlet", "gevent"]): + reason_no_sysmon = f"it doesn't support concurrency={bad}" + + core_name: str | None = None + if config.timid: + core_name = "pytrace" + _debug("core.py: Using pytrace because timid=True") + elif core_name is None: + # This could still leave core_name as None. + core_name = config.core + _debug(f"core.py: core from config is {core_name!r}") + + if core_name == "sysmon" and reason_no_sysmon: + _debug(f"core.py: defaulting because sysmon not usable: {reason_no_sysmon}") + warn(f"Can't use core=sysmon: {reason_no_sysmon}, using default core", slug="no-sysmon") + core_name = None + + if core_name is None: + if env.SYSMON_DEFAULT and not reason_no_sysmon: + core_name = "sysmon" + _debug("core.py: Using sysmon because SYSMON_DEFAULT is set") + else: + core_name = "ctrace" + _debug("core.py: Defaulting to ctrace core") + + if core_name == "ctrace": + if not CTRACER_FILE: + if IMPORT_ERROR and env.SHIPPING_WHEELS: + warn(f"Couldn't import C tracer: {IMPORT_ERROR}", slug="no-ctracer", once=True) + core_name = "pytrace" + _debug("core.py: Falling back to pytrace because C tracer not available") + + _debug(f"core.py: Using core={core_name}") + + self.tracer_kwargs = {} + + if core_name == "sysmon": + self.tracer_class = SysMonitor + self.tracer_kwargs["tool_id"] = 3 if metacov else 1 + self.file_disposition_class = FileDisposition + self.supports_plugins = False + self.packed_arcs = False + self.systrace = False + elif core_name == "ctrace": + self.tracer_class = coverage.tracer.CTracer + self.file_disposition_class = coverage.tracer.CFileDisposition + self.supports_plugins = True + self.packed_arcs = True + self.systrace = True + elif core_name == "pytrace": + self.tracer_class = PyTracer + self.file_disposition_class = FileDisposition + self.supports_plugins = False + self.packed_arcs = False + self.systrace = True + else: + raise ConfigError(f"Unknown core value: {core_name!r}") + + def __repr__(self) -> str: + return f"" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/data.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/data.py new file mode 100644 index 0000000..4310470 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/data.py @@ -0,0 +1,227 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Coverage data for coverage.py. + +This file had the 4.x JSON data support, which is now gone. This file still +has storage-agnostic helpers, and is kept to avoid changing too many imports. +CoverageData is now defined in sqldata.py, and imported here to keep the +imports working. + +""" + +from __future__ import annotations + +import functools +import glob +import hashlib +import os.path +from collections.abc import Iterable +from typing import Callable + +from coverage.exceptions import CoverageException, NoDataError +from coverage.files import PathAliases +from coverage.misc import Hasher, file_be_gone, human_sorted, plural +from coverage.sqldata import CoverageData as CoverageData # pylint: disable=useless-import-alias + + +def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]: + """Return a dict summarizing the line coverage data. + + Keys are based on the file names, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + Returns a dict mapping file names to counts of lines. + + """ + summ = {} + filename_fn: Callable[[str], str] + if fullpath: + # pylint: disable=unnecessary-lambda-assignment + filename_fn = lambda f: f + else: + filename_fn = os.path.basename + for filename in data.measured_files(): + lines = data.lines(filename) + assert lines is not None + summ[filename_fn(filename)] = len(lines) + return summ + + +def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: + """Contribute `filename`'s data to the `hasher`. + + `hasher` is a `coverage.misc.Hasher` instance to be updated with + the file's data. It should only get the results data, not the run + data. + + """ + if data.has_arcs(): + hasher.update(sorted(data.arcs(filename) or [])) + else: + hasher.update(sorted_lines(data, filename)) + hasher.update(data.file_tracer(filename)) + + +def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]: + """Make a list of data files to be combined. + + `data_file` is a path to a data file. `data_paths` is a list of files or + directories of files. + + Returns a list of absolute file paths. + """ + data_dir, local = os.path.split(os.path.abspath(data_file)) + + data_paths = data_paths or [data_dir] + files_to_combine = [] + for p in data_paths: + if os.path.isfile(p): + files_to_combine.append(os.path.abspath(p)) + elif os.path.isdir(p): + pattern = glob.escape(os.path.join(os.path.abspath(p), local)) + ".*" + files_to_combine.extend(glob.glob(pattern)) + else: + raise NoDataError(f"Couldn't combine from non-existent path '{p}'") + + # SQLite might have made journal files alongside our database files. + # We never want to combine those. + files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")] + + # Sorting isn't usually needed, since it shouldn't matter what order files + # are combined, but sorting makes tests more predictable, and makes + # debugging more understandable when things go wrong. + return sorted(files_to_combine) + + +def combine_parallel_data( + data: CoverageData, + aliases: PathAliases | None = None, + data_paths: Iterable[str] | None = None, + strict: bool = False, + keep: bool = False, + message: Callable[[str], None] | None = None, +) -> None: + """Combine a number of data files together. + + `data` is a CoverageData. + + Treat `data.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix plus a dot. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + If `data_paths` is provided, it is a list of directories or files to + combine. Directories are searched for files that start with + `data.filename` plus dot as a prefix, and those files are combined. + + If `data_paths` is not provided, then the directory portion of + `data.filename` is used as the directory to search for data files. + + Unless `keep` is True every data file found and combined is then deleted + from disk. If a file cannot be read, a warning will be issued, and the + file will not be deleted. + + If `strict` is true, and no files are found to combine, an error is + raised. + + `message` is a function to use for printing messages to the user. + + """ + files_to_combine = combinable_files(data.base_filename(), data_paths) + + if strict and not files_to_combine: + raise NoDataError("No data to combine") + + if aliases is None: + map_path = None + else: + map_path = functools.cache(aliases.map) + + file_hashes = set() + combined_any = False + + for f in files_to_combine: + if f == data.data_filename(): + # Sometimes we are combining into a file which is one of the + # parallel files. Skip that file. + if data._debug.should("dataio"): + data._debug.write(f"Skipping combining ourself: {f!r}") + continue + + try: + rel_file_name = os.path.relpath(f) + except ValueError: + # ValueError can be raised under Windows when os.getcwd() returns a + # folder from a different drive than the drive of f, in which case + # we print the original value of f instead of its relative path + rel_file_name = f + + with open(f, "rb") as fobj: + hasher = hashlib.new("sha3_256", usedforsecurity=False) + hasher.update(fobj.read()) + sha = hasher.digest() + combine_this_one = sha not in file_hashes + + delete_this_one = not keep + if combine_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Combining data file {f!r}") + file_hashes.add(sha) + try: + new_data = CoverageData(f, debug=data._debug) + new_data.read() + except CoverageException as exc: + if data._warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + data._warn(str(exc)) + if message: + message(f"Couldn't combine data file {rel_file_name}: {exc}") + delete_this_one = False + else: + data.update(new_data, map_path=map_path) + combined_any = True + if message: + message(f"Combined data file {rel_file_name}") + else: + if message: + message(f"Skipping duplicate data {rel_file_name}") + + if delete_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Deleting data file {f!r}") + file_be_gone(f) + + if strict and not combined_any: + raise NoDataError("No usable data files") + + +def debug_data_file(filename: str) -> None: + """Implementation of 'coverage debug data'.""" + data = CoverageData(filename) + filename = data.data_filename() + print(f"path: {filename}") + if not os.path.exists(filename): + print("No data collected: file doesn't exist") + return + data.read() + print(f"has_arcs: {data.has_arcs()!r}") + summary = line_counts(data, fullpath=True) + filenames = human_sorted(summary.keys()) + nfiles = len(filenames) + print(f"{nfiles} file{plural(nfiles)}:") + for f in filenames: + line = f"{f}: {summary[f]} line{plural(summary[f])}" + plugin = data.file_tracer(f) + if plugin: + line += f" [{plugin}]" + print(line) + + +def sorted_lines(data: CoverageData, filename: str) -> list[int]: + """Get the sorted lines for a file, for tests.""" + lines = data.lines(filename) + return sorted(lines or []) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/debug.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/debug.py new file mode 100644 index 0000000..a134990 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/debug.py @@ -0,0 +1,669 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Control of and utilities for debugging.""" + +from __future__ import annotations + +import _thread +import atexit +import contextlib +import datetime +import functools +import inspect +import itertools +import os +import pprint +import re +import reprlib +import sys +import traceback +import types +from collections.abc import Iterable, Iterator, Mapping +from typing import IO, Any, Callable, Final, overload + +from coverage.misc import human_sorted_items, isolate_module +from coverage.types import AnyCallable, TWritable + +os = isolate_module(os) + + +# When debugging, it can be helpful to force some options, especially when +# debugging the configuration mechanisms you usually use to control debugging! +# This is a list of forced debugging options. +FORCED_DEBUG: list[str] = [] +FORCED_DEBUG_FILE = None + + +class DebugControl: + """Control and output for debugging.""" + + show_repr_attr = False # For auto_repr + + def __init__( + self, + options: Iterable[str], + output: IO[str] | None, + file_name: str | None = None, + ) -> None: + """Configure the options and output file for debugging.""" + self.options = list(options) + FORCED_DEBUG + self.suppress_callers = False + + filters = [] + if self.should("process"): + filters.append(CwdTracker().filter) + filters.append(ProcessTracker().filter) + if self.should("pytest"): + filters.append(PytestTracker().filter) + if self.should("pid"): + filters.append(add_pid_and_tid) + + self.output = DebugOutputFile.get_one( + output, + file_name=file_name, + filters=filters, + ) + self.raw_output = self.output.outfile + + def __repr__(self) -> str: + return f"" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + if option == "callers" and self.suppress_callers: + return False + return option in self.options + + @contextlib.contextmanager + def without_callers(self) -> Iterator[None]: + """A context manager to prevent call stacks from being logged.""" + old = self.suppress_callers + self.suppress_callers = True + try: + yield + finally: + self.suppress_callers = old + + def write(self, msg: str, *, exc: BaseException | None = None) -> None: + """Write a line of debug output. + + `msg` is the line to write. A newline will be appended. + + If `exc` is provided, a stack trace of the exception will be written + after the message. + + """ + self.output.write(msg + "\n") + if exc is not None: + self.output.write("".join(traceback.format_exception(None, exc, exc.__traceback__))) + if self.should("self"): + caller_self = inspect.stack()[1][0].f_locals.get("self") + if caller_self is not None: + self.output.write(f"self: {caller_self!r}\n") + if self.should("callers"): + dump_stack_frames(out=self.output, skip=1) + self.output.flush() + + +class NoDebugging(DebugControl): + """A replacement for DebugControl that will never try to do anything.""" + + def __init__(self) -> None: + # pylint: disable=super-init-not-called + pass + + def should(self, option: str) -> bool: + """Should we write debug messages? Never.""" + return False + + @contextlib.contextmanager + def without_callers(self) -> Iterator[None]: + """A dummy context manager to satisfy the api.""" + yield # pragma: never called + + def write(self, msg: str, *, exc: BaseException | None = None) -> None: + """This will never be called.""" + raise AssertionError("NoDebugging.write should never be called.") + + +class DevNullDebug(NoDebugging): + """A DebugControl that won't write anywhere.""" + + def write(self, msg: str, *, exc: BaseException | None = None) -> None: + pass + + +def info_header(label: str) -> str: + """Make a nice header string.""" + return "--{:-<60s}".format(" " + label + " ") + + +def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterable[str]: + """Produce a sequence of formatted lines from info. + + `info` is a sequence of pairs (label, data). The produced lines are + nicely formatted, ready to print. + + """ + info = list(info) + if not info: + return + LABEL_LEN = 30 + assert all(len(l) < LABEL_LEN for l, _ in info) + for label, data in info: + if data == []: + data = "-none-" + prefix = f"{label:>{LABEL_LEN}}: " + match data: + case tuple() if len(str(data)) < 30: + yield f"{prefix}{data}" + case tuple() | list() | set(): + for e in data: + yield f"{prefix}{e}" + prefix = " " * (LABEL_LEN + 2) + case _: + yield f"{prefix}{data}" + + +def write_formatted_info( + write: Callable[[str], None], + header: str, + info: Iterable[tuple[str, Any]], +) -> None: + """Write a sequence of (label,data) pairs nicely. + + `write` is a function write(str) that accepts each line of output. + `header` is a string to start the section. `info` is a sequence of + (label, data) pairs, where label is a str, and data can be a single + value, or a list/set/tuple. + + """ + write(info_header(header)) + for line in info_formatter(info): + write(f" {line}") + + +def exc_one_line(exc: Exception) -> str: + """Get a one-line summary of an exception, including class name and message.""" + lines = traceback.format_exception_only(type(exc), exc) + return "|".join(l.rstrip() for l in lines) + + +_FILENAME_REGEXES: list[tuple[str, str]] = [ + (r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"), +] +_FILENAME_SUBS: list[tuple[str, str]] = [] + + +@overload +def short_filename(filename: str) -> str: + pass + + +@overload +def short_filename(filename: None) -> None: + pass + + +def short_filename(filename: str | None) -> str | None: + """Shorten a file name. Directories are replaced by prefixes like 'syspath:'""" + if not _FILENAME_SUBS: + for pathdir in sys.path: + _FILENAME_SUBS.append((pathdir, "syspath:")) + import coverage + + _FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:")) + _FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True) + if filename is not None: + for pat, sub in _FILENAME_REGEXES: + filename = re.sub(pat, sub, filename) + for before, after in _FILENAME_SUBS: + filename = filename.replace(before, after) + return filename + + +def file_summary(filename: str) -> str: + """A one-line summary of a file, for log messages.""" + try: + s = os.stat(filename) + except FileNotFoundError: + summary = "does not exist" + except Exception as e: + summary = f"error: {e}" + else: + mod = datetime.datetime.fromtimestamp(s.st_mtime) + summary = f"{s.st_size} bytes, modified {mod}" + return summary + + +def short_stack( + skip: int = 0, + full: bool = False, + frame_ids: bool = False, + short_filenames: bool = False, +) -> str: + """Return a string summarizing the call stack. + + The string is multi-line, with one line per stack frame. Each line shows + the function name, the file name, and the line number: + + ... + start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py:95 + import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py:81 + import_local_file : /Users/ned/coverage/trunk/coverage/backward.py:159 + ... + + `skip` is the number of closest immediate frames to skip, so that debugging + functions can call this and not be included in the result. + + If `full` is true, then include all frames. Otherwise, initial "boring" + frames (ones in site-packages and earlier) are omitted. + + `short_filenames` will shorten filenames using `short_filename`, to reduce + the amount of repetitive noise in stack traces. + + """ + # Regexes in initial frames that we don't care about. + # fmt: off + BORING_PRELUDE = [ + "", # pytest-xdist has string execution. + r"\bigor.py$", # Our test runner. + r"\bsite-packages\b", # pytest etc getting to our tests. + ] + # fmt: on + + stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1] + if not full: + for pat in BORING_PRELUDE: + stack = itertools.dropwhile( + (lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc] + stack, + ) + lines = [] + for frame_info in stack: + line = f"{frame_info.function:>30s} : " + if frame_ids: + line += f"{id(frame_info.frame):#x} " + filename = frame_info.filename + if short_filenames: + filename = short_filename(filename) + line += f"{filename}:{frame_info.lineno}" + lines.append(line) + return "\n".join(lines) + + +def dump_stack_frames(out: TWritable, skip: int = 0) -> None: + """Print a summary of the stack to `out`.""" + out.write(short_stack(skip=skip + 1) + "\n") + + +def clipped_repr(text: str, numchars: int = 50) -> str: + """`repr(text)`, but limited to `numchars`.""" + r = reprlib.Repr() + r.maxstring = numchars + return r.repr(text) + + +def short_id(id64: int) -> int: + """Given a 64-bit id, make a shorter 16-bit one.""" + id16 = 0 + for offset in range(0, 64, 16): + id16 ^= id64 >> offset + return id16 & 0xFFFF + + +def add_pid_and_tid(text: str) -> str: + """A filter to add pid and tid to debug messages.""" + # Thread ids are useful, but too long. Make a shorter one. + tid = f"{short_id(_thread.get_ident()):04x}" + text = f"{os.getpid():5d}.{tid}: {text}" + return text + + +AUTO_REPR_IGNORE = {"$coverage.object_id"} + + +def auto_repr(self: Any) -> str: + """A function implementing an automatic __repr__ for debugging.""" + show_attrs = ( + (k, v) + for k, v in self.__dict__.items() + if getattr(v, "show_repr_attr", True) + and not inspect.ismethod(v) + and k not in AUTO_REPR_IGNORE + ) + return "<{klass} @{id:#x}{attrs}>".format( + klass=self.__class__.__name__, + id=id(self), + attrs="".join(f" {k}={v!r}" for k, v in show_attrs), + ) + + +def simplify(v: Any) -> Any: # pragma: debugging + """Turn things which are nearly dict/list/etc into dict/list/etc.""" + if isinstance(v, dict): + return {k: simplify(vv) for k, vv in v.items()} + elif isinstance(v, (list, tuple)): + return type(v)(simplify(vv) for vv in v) + elif hasattr(v, "__dict__"): + return simplify({"." + k: v for k, v in v.__dict__.items()}) + else: + return v + + +def ppformat(v: Any) -> str: # pragma: debugging + """Debug helper to pretty-print data, including SimpleNamespace objects.""" + return pprint.pformat(simplify(v), indent=4, compact=True, sort_dicts=True, width=140) + + +def pp(v: Any) -> None: # pragma: debugging + """Debug helper to pretty-print data, including SimpleNamespace objects.""" + print(ppformat(v)) + + +def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: + """Run `text` through a series of filters. + + `filters` is a list of functions. Each takes a string and returns a + string. Each is run in turn. After each filter, the text is split into + lines, and each line is passed through the next filter. + + Returns: the final string that results after all of the filters have + run. + + """ + clean_text = text.rstrip() + ending = text[len(clean_text) :] + text = clean_text + for filter_fn in filters: + lines = [] + for line in text.splitlines(): + lines.extend(filter_fn(line).splitlines()) + text = "\n".join(lines) + return text + ending + + +class CwdTracker: + """A class to add cwd info to debug messages.""" + + def __init__(self) -> None: + self.cwd: str | None = None + + def filter(self, text: str) -> str: + """Add a cwd message for each new cwd.""" + cwd = os.getcwd() + if cwd != self.cwd: + text = f"cwd is now {cwd!r}\n{text}" + self.cwd = cwd + return text + + +class ProcessTracker: + """Track process creation for debug logging.""" + + def __init__(self) -> None: + self.pid: int = os.getpid() + self.did_welcome = False + + def filter(self, text: str) -> str: + """Add a message about how new processes came to be.""" + welcome = "" + pid = os.getpid() + if self.pid != pid: + welcome = f"New process: forked {self.pid} -> {pid}\n" + self.pid = pid + elif not self.did_welcome: + argv = getattr(sys, "argv", None) + welcome = ( + f"New process: {pid=}, executable: {sys.executable!r}\n" + + f"New process: cmd: {argv!r}\n" + + f"New process parent pid: {os.getppid()!r}\n" + ) + + if welcome: + self.did_welcome = True + return welcome + text + else: + return text + + +class PytestTracker: + """Track the current pytest test name to add to debug messages.""" + + def __init__(self) -> None: + self.test_name: str | None = None + + def filter(self, text: str) -> str: + """Add a message when the pytest test changes.""" + test_name = os.getenv("PYTEST_CURRENT_TEST") + if test_name != self.test_name: + text = f"Pytest context: {test_name}\n{text}" + self.test_name = test_name + return text + + +class DebugOutputFile: + """A file-like object that includes pid and cwd information.""" + + def __init__( + self, + outfile: IO[str] | None, + filters: Iterable[Callable[[str], str]], + ): + self.outfile = outfile + self.filters = list(filters) + self.pid = os.getpid() + + @classmethod + def get_one( + cls, + fileobj: IO[str] | None = None, + file_name: str | None = None, + filters: Iterable[Callable[[str], str]] = (), + interim: bool = False, + ) -> DebugOutputFile: + """Get a DebugOutputFile. + + If `fileobj` is provided, then a new DebugOutputFile is made with it. + + If `fileobj` isn't provided, then a file is chosen (`file_name` if + provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide + singleton DebugOutputFile is made. + + `filters` are the text filters to apply to the stream to annotate with + pids, etc. + + If `interim` is true, then a future `get_one` can replace this one. + + """ + if fileobj is not None: + # Make DebugOutputFile around the fileobj passed. + return cls(fileobj, filters) + + the_one, is_interim = cls._get_singleton_data() + if the_one is None or is_interim: + if file_name is not None: + fileobj = open(file_name, "a", encoding="utf-8") + else: + # $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output + file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) + if file_name in ["stdout", "stderr"]: + fileobj = getattr(sys, file_name) + elif file_name: + fileobj = open(file_name, "a", encoding="utf-8") + atexit.register(fileobj.close) + else: + fileobj = sys.stderr + the_one = cls(fileobj, filters) + cls._set_singleton_data(the_one, interim) + + if not (the_one.filters): + the_one.filters = list(filters) + return the_one + + # Because of the way igor.py deletes and re-imports modules, + # this class can be defined more than once. But we really want + # a process-wide singleton. So stash it in sys.modules instead of + # on a class attribute. Yes, this is aggressively gross. + + SYS_MOD_NAME: Final[str] = "$coverage.debug.DebugOutputFile.the_one" + SINGLETON_ATTR: Final[str] = "the_one_and_is_interim" + + @classmethod + def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None: + """Set the one DebugOutputFile to rule them all.""" + singleton_module = types.ModuleType(cls.SYS_MOD_NAME) + setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim)) + sys.modules[cls.SYS_MOD_NAME] = singleton_module + + @classmethod + def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]: + """Get the one DebugOutputFile.""" + singleton_module = sys.modules.get(cls.SYS_MOD_NAME) + return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True)) + + @classmethod + def _del_singleton_data(cls) -> None: + """Delete the one DebugOutputFile, just for tests to use.""" + if cls.SYS_MOD_NAME in sys.modules: + del sys.modules[cls.SYS_MOD_NAME] + + def write(self, text: str) -> None: + """Just like file.write, but filter through all our filters.""" + assert self.outfile is not None + if not self.outfile.closed: + self.outfile.write(filter_text(text, self.filters)) + self.outfile.flush() + + def flush(self) -> None: + """Flush our file.""" + assert self.outfile is not None + if not self.outfile.closed: + self.outfile.flush() + + +def log(msg: str, stack: bool = False) -> None: # pragma: debugging + """Write a log message as forcefully as possible.""" + out = DebugOutputFile.get_one(interim=True) + out.write(msg + "\n") + if stack: + dump_stack_frames(out=out, skip=1) + + +def decorate_methods( + decorator: Callable[..., Any], + butnot: Iterable[str] = (), + private: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A class decorator to apply a decorator to methods.""" + + def _decorator(cls): # type: ignore[no-untyped-def] + for name, meth in inspect.getmembers(cls, inspect.isroutine): + if name not in cls.__dict__: + continue + if name != "__init__": + if not private and name.startswith("_"): + continue + if name in butnot: + continue + setattr(cls, name, decorator(meth)) + return cls + + return _decorator + + +def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging + """A function decorator to stop in the debugger for each call.""" + + @functools.wraps(func) + def _wrapper(*args: Any, **kwargs: Any) -> Any: + import pudb + + sys.stdout = sys.__stdout__ + pudb.set_trace() + return func(*args, **kwargs) + + return _wrapper + + +OBJ_IDS = itertools.count() +CALLS = itertools.count() +OBJ_ID_ATTR = "$coverage.object_id" + + +def show_calls( + show_args: bool = True, + show_stack: bool = False, + show_return: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A method decorator to debug-log each call to the function.""" + + def _decorator(func: AnyCallable) -> AnyCallable: + @functools.wraps(func) + def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + oid = getattr(self, OBJ_ID_ATTR, None) + if oid is None: + oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}" + setattr(self, OBJ_ID_ATTR, oid) + extra = "" + if show_args: + eargs = ", ".join(map(repr, args)) + ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) + extra += "(" + extra += eargs + if eargs and ekwargs: + extra += ", " + extra += ekwargs + extra += ")" + if show_stack: + extra += " @ " + extra += "; ".join(short_stack(short_filenames=True).splitlines()) + callid = next(CALLS) + msg = f"{oid} {callid:04d} {func.__name__}{extra}\n" + DebugOutputFile.get_one(interim=True).write(msg) + ret = func(self, *args, **kwargs) + if show_return: + msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" + DebugOutputFile.get_one(interim=True).write(msg) + return ret + + return _wrapper + + return _decorator + + +def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]: + """Filter environment variables for a debug display. + + Select variables to display (with COV or PY in the name, or HOME, TEMP, or + TMP), and also cloak sensitive values with asterisks. + + Arguments: + env: a dict of environment variable names and values. + + Returns: + A list of pairs (name, value) to show. + + """ + SLUGS = {"COV", "PY"} + INCLUDE = {"HOME", "TEMP", "TMP"} + CLOAK = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"} + TRUNCATE = {"COVERAGE_PROCESS_CONFIG"} + TRUNCATE_LEN = 60 + + to_show = [] + for name, val in env.items(): + show = False + if name in INCLUDE: + show = True + elif any(slug in name for slug in SLUGS): + show = True + if show: + if any(slug in name for slug in CLOAK): + val = re.sub(r"\w", "*", val) + if name in TRUNCATE: + if len(val) > TRUNCATE_LEN: + val = val[: TRUNCATE_LEN - 3] + "..." + to_show.append((name, val)) + return human_sorted_items(to_show) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/disposition.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/disposition.py new file mode 100644 index 0000000..0ecdfad --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/disposition.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Simple value objects for tracking what to do with files.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from coverage.types import TFileDisposition + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + + +class FileDisposition: + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: str | None + trace: bool + reason: str + file_tracer: FileTracer | None + has_dynamic_filename: bool + + def __repr__(self) -> str: + return f"" + + +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + + +def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition: + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def disposition_debug_msg(disp: TFileDisposition) -> str: + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = f"Tracing {disp.original_filename!r}" + if disp.original_filename != disp.source_filename: + msg += f" as {disp.source_filename!r}" + if disp.file_tracer: + msg += f": will be traced by {disp.file_tracer!r}" + else: + msg = f"Not tracing {disp.original_filename!r}: {disp.reason}" + return msg diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/env.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/env.py new file mode 100644 index 0000000..8cf8230 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/env.py @@ -0,0 +1,135 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Determine facts about the environment.""" + +from __future__ import annotations + +import os +import platform +import sys +from collections.abc import Iterable +from typing import Any, Final + +# debug_info() at the bottom wants to show all the globals, but not imports. +# Grab the global names here to know which names to not show. Nothing defined +# above this line will be in the output. +_UNINTERESTING_GLOBALS = list(globals()) +# These names also shouldn't be shown. +_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"] + +# Operating systems. +WINDOWS = sys.platform == "win32" +LINUX = sys.platform.startswith("linux") +MACOS = sys.platform == "darwin" + +# Python implementations. +CPYTHON = (platform.python_implementation() == "CPython") # fmt: skip +PYPY = (platform.python_implementation() == "PyPy") # fmt: skip + +# Python versions. We amend version_info with one more value, a zero if an +# official version, or 1 if built from source beyond an official version. +# Only use sys.version_info directly where tools like mypy need it to understand +# version-specfic code, otherwise use PYVERSION. +PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) + +if PYPY: + # Minimum now is 7.3.16 + PYPYVERSION = tuple(sys.pypy_version_info) # type: ignore[attr-defined] +else: + PYPYVERSION = (0,) + +# Do we have a GIL? +GIL = getattr(sys, "_is_gil_enabled", lambda: True)() + +# Do we ship compiled coveragepy wheels for this version? +SHIPPING_WHEELS = CPYTHON and PYVERSION[:2] <= (3, 14) + +# Should we default to sys.monitoring? +SYSMON_DEFAULT = CPYTHON and PYVERSION >= (3, 14) + + +# Python behavior. +class PYBEHAVIOR: + """Flags indicating this Python's behavior.""" + + # When leaving a with-block, do we visit the with-line exactly, + # or the context managers in inner-out order? + # + # mwith.py: + # with ( + # open("/tmp/one", "w") as f2, + # open("/tmp/two", "w") as f3, + # open("/tmp/three", "w") as f4, + # ): + # print("hello 6") + # + # % python3.11 -m trace -t mwith.py | grep mwith + # --- modulename: mwith, funcname: + # mwith.py(2): open("/tmp/one", "w") as f2, + # mwith.py(1): with ( + # mwith.py(2): open("/tmp/one", "w") as f2, + # mwith.py(3): open("/tmp/two", "w") as f3, + # mwith.py(1): with ( + # mwith.py(3): open("/tmp/two", "w") as f3, + # mwith.py(4): open("/tmp/three", "w") as f4, + # mwith.py(1): with ( + # mwith.py(4): open("/tmp/three", "w") as f4, + # mwith.py(6): print("hello 6") + # mwith.py(1): with ( + # + # % python3.12 -m trace -t mwith.py | grep mwith + # --- modulename: mwith, funcname: + # mwith.py(2): open("/tmp/one", "w") as f2, + # mwith.py(3): open("/tmp/two", "w") as f3, + # mwith.py(4): open("/tmp/three", "w") as f4, + # mwith.py(6): print("hello 6") + # mwith.py(4): open("/tmp/three", "w") as f4, + # mwith.py(3): open("/tmp/two", "w") as f3, + # mwith.py(2): open("/tmp/one", "w") as f2, + + exit_with_through_ctxmgr = (PYVERSION >= (3, 12, 6)) # fmt: skip + + # f-strings are parsed as code, pep 701 + fstring_syntax = (PYVERSION >= (3, 12)) # fmt: skip + + # PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/ + pep669: Final[bool] = bool(getattr(sys, "monitoring", None)) + + # Where does frame.f_lasti point when yielding from a generator? + # It used to point at the YIELD, in 3.13 it points at the RESUME, + # then it went back to the YIELD. + # https://github.com/python/cpython/issues/113728 + lasti_is_yield = (PYVERSION[:2] != (3, 13)) # fmt: skip + + # PEP649 and PEP749: Deferred annotations + deferred_annotations = (PYVERSION >= (3, 14)) # fmt: skip + + # Does sys.monitoring support BRANCH_RIGHT and BRANCH_LEFT? The names + # were added in early 3.14 alphas, but didn't work entirely correctly until + # after 3.14.0a5. + branch_right_left = pep669 and (PYVERSION > (3, 14, 0, "alpha", 5, 0)) + + +# Coverage.py specifics, about testing scenarios. See tests/testenv.py also. + +# Are we coverage-measuring ourselves? +METACOV = os.getenv("COVERAGE_COVERAGE") is not None + +# Are we running our test suite? +# Even when running tests, you can use COVERAGE_TESTING=0 to disable the +# test-specific behavior like AST checking. +TESTING = os.getenv("COVERAGE_TESTING") == "True" + + +def debug_info() -> Iterable[tuple[str, Any]]: + """Return a list of (name, value) pairs for printing debug information.""" + info = [ + (name, value) + for name, value in globals().items() + if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS + ] + info += [ + (name, value) for name, value in PYBEHAVIOR.__dict__.items() if not name.startswith("_") + ] + return sorted(info) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/exceptions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/exceptions.py new file mode 100644 index 0000000..233788b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/exceptions.py @@ -0,0 +1,85 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Exceptions coverage.py can raise.""" + +from __future__ import annotations + +from typing import Any + + +class CoverageException(Exception): + """The base class of all exceptions raised by Coverage.py.""" + + def __init__( + self, + *args: Any, + slug: str | None = None, + ) -> None: + """Create an exception. + + Args: + slug: A short string identifying the exception, will be used for + linking to documentation. + """ + + super().__init__(*args) + self.slug = slug + + +class ConfigError(CoverageException): + """A problem with a config file, or a value in one.""" + + pass + + +class DataError(CoverageException): + """An error in using a data file.""" + + pass + + +class NoDataError(CoverageException): + """We didn't have data to work with.""" + + pass + + +class NoSource(CoverageException): + """We couldn't find the source for a module.""" + + pass + + +class NoCode(NoSource): + """We couldn't find any code at all.""" + + pass + + +class NotPython(CoverageException): + """A source file turned out not to be parsable Python.""" + + pass + + +class PluginError(CoverageException): + """A plugin misbehaved.""" + + pass + + +class _ExceptionDuringRun(CoverageException): + """An exception happened while running customer code. + + Construct it with three arguments, the values from `sys.exc_info`. + + """ + + pass + + +class CoverageWarning(Warning): + """A warning from Coverage.py.""" + + pass diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/execfile.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/execfile.py new file mode 100644 index 0000000..1f8996c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/execfile.py @@ -0,0 +1,329 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Execute files of Python code.""" + +from __future__ import annotations + +import importlib.machinery +import importlib.util +import inspect +import marshal +import os +import struct +import sys +from importlib.machinery import ModuleSpec +from types import CodeType, ModuleType +from typing import Any + +from coverage.exceptions import CoverageException, NoCode, NoSource, _ExceptionDuringRun +from coverage.files import canonical_filename, python_reported_file +from coverage.misc import isolate_module +from coverage.python import get_python_source + +os = isolate_module(os) + + +PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER + + +class DummyLoader: + """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. + + Currently only implements the .fullname attribute + """ + + def __init__(self, fullname: str, *_args: Any) -> None: + self.fullname = fullname + + +def find_module( + modulename: str, +) -> tuple[str | None, str, ModuleSpec]: + """Find the module named `modulename`. + + Returns the file path of the module, the name of the enclosing + package, and the spec. + """ + try: + spec = importlib.util.find_spec(modulename) + except ImportError as err: + raise NoSource(str(err)) from err + if not spec: + raise NoSource(f"No module named {modulename!r}") + pathname = spec.origin + packagename = spec.name + if spec.submodule_search_locations: + mod_main = modulename + ".__main__" + spec = importlib.util.find_spec(mod_main) + if not spec: + raise NoSource( + f"No module named {mod_main}; " + + f"{modulename!r} is a package and cannot be directly executed", + ) + pathname = spec.origin + packagename = spec.name + packagename = packagename.rpartition(".")[0] + return pathname, packagename, spec + + +class PyRunner: + """Multi-stage execution of Python code. + + This is meant to emulate real Python execution as closely as possible. + + """ + + def __init__(self, args: list[str], as_module: bool = False) -> None: + self.args = args + self.as_module = as_module + + self.arg0 = args[0] + self.package: str | None = None + self.modulename: str | None = None + self.pathname: str | None = None + self.loader: DummyLoader | None = None + self.spec: ModuleSpec | None = None + + def prepare(self) -> None: + """Set sys.path properly. + + This needs to happen before any importing, and without importing anything. + """ + path0: str | None + if getattr(sys.flags, "safe_path", False): + # See https://docs.python.org/3/using/cmdline.html#cmdoption-P + path0 = None + elif self.as_module: + path0 = os.getcwd() + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + path0 = self.arg0 + else: + path0 = os.path.abspath(os.path.dirname(self.arg0)) + + if os.path.isdir(sys.path[0]): + # sys.path fakery. If we are being run as a command, then sys.path[0] + # is the directory of the "coverage" script. If this is so, replace + # sys.path[0] with the directory of the file we're running, or the + # current directory when running modules. If it isn't so, then we + # don't know what's going on, and just leave it alone. + top_file = inspect.stack()[-1][0].f_code.co_filename + sys_path_0_abs = os.path.abspath(sys.path[0]) + top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) + sys_path_0_abs = canonical_filename(sys_path_0_abs) + top_file_dir_abs = canonical_filename(top_file_dir_abs) + if sys_path_0_abs != top_file_dir_abs: + path0 = None + + else: + # sys.path[0] is a file. Is the next entry the directory containing + # that file? + if sys.path[1] == os.path.dirname(sys.path[0]): + # Can it be right to always remove that? + del sys.path[1] + + if path0 is not None: + sys.path[0] = python_reported_file(path0) + + def _prepare2(self) -> None: + """Do more preparation to run Python code. + + Includes finding the module to run and adjusting sys.argv[0]. + This method is allowed to import code. + + """ + if self.as_module: + self.modulename = self.arg0 + pathname, self.package, self.spec = find_module(self.modulename) + if self.spec is not None: + self.modulename = self.spec.name + self.loader = DummyLoader(self.modulename) + assert pathname is not None + self.pathname = os.path.abspath(pathname) + self.args[0] = self.arg0 = self.pathname + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + for ext in [".py", ".pyc", ".pyo"]: + try_filename = os.path.join(self.arg0, f"__main__{ext}") + # 3.8.10 changed how files are reported when running a + # directory. + try_filename = os.path.abspath(try_filename) + if os.path.exists(try_filename): + self.arg0 = try_filename + break + else: + raise NoSource(f"Can't find '__main__' module in '{self.arg0}'") + + # Make a spec. I don't know if this is the right way to do it. + try_filename = python_reported_file(try_filename) + self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) + self.spec.has_location = True + self.package = "" + self.loader = DummyLoader("__main__") + else: + self.loader = DummyLoader("__main__") + + self.arg0 = python_reported_file(self.arg0) + + def run(self) -> None: + """Run the Python code!""" + + self._prepare2() + + # Create a module to serve as __main__ + main_mod = ModuleType("__main__") + + from_pyc = self.arg0.endswith((".pyc", ".pyo")) + main_mod.__file__ = self.arg0 + if from_pyc: + main_mod.__file__ = main_mod.__file__[:-1] + if self.package is not None: + main_mod.__package__ = self.package + main_mod.__loader__ = self.loader # type: ignore[assignment] + if self.spec is not None: + main_mod.__spec__ = self.spec + + main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] + + sys.modules["__main__"] = main_mod + + # Set sys.argv properly. + sys.argv = self.args + + try: + # Make a code object somehow. + if from_pyc: + code = make_code_from_pyc(self.arg0) + else: + code = make_code_from_py(self.arg0) + except CoverageException: + raise + except Exception as exc: + msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}" + raise CoverageException(msg) from exc + + # Execute the code object. + # Return to the original directory in case the test code exits in + # a non-existent directory. + cwd = os.getcwd() + try: + exec(code, main_mod.__dict__) + except SystemExit: # pylint: disable=try-except-raise + # The user called sys.exit(). Just pass it along to the upper + # layers, where it will be handled. + raise + except Exception: + # Something went wrong while executing the user code. + # Get the exc_info, and pack them into an exception that we can + # throw up to the outer loop. We peel one layer off the traceback + # so that the coverage.py code doesn't appear in the final printed + # traceback. + typ, err, tb = sys.exc_info() + assert typ is not None + assert err is not None + assert tb is not None + + # PyPy3 weirdness. If I don't access __context__, then somehow it + # is non-None when the exception is reported at the upper layer, + # and a nested exception is shown to the user. This getattr fixes + # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 + getattr(err, "__context__", None) + + # Call the excepthook. + try: + assert err.__traceback__ is not None + err.__traceback__ = err.__traceback__.tb_next + sys.excepthook(typ, err, tb.tb_next) + except SystemExit: # pylint: disable=try-except-raise + raise + except Exception as exc: + # Getting the output right in the case of excepthook + # shenanigans is kind of involved. + sys.stderr.write("Error in sys.excepthook:\n") + typ2, err2, tb2 = sys.exc_info() + assert typ2 is not None + assert err2 is not None + assert tb2 is not None + err2.__suppress_context__ = True + assert err2.__traceback__ is not None + err2.__traceback__ = err2.__traceback__.tb_next + sys.__excepthook__(typ2, err2, tb2.tb_next) + sys.stderr.write("\nOriginal exception was:\n") + raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc + else: + sys.exit(1) + finally: + os.chdir(cwd) + + +def run_python_module(args: list[str]) -> None: + """Run a Python module, as though with ``python -m name args...``. + + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=True) + runner.prepare() + runner.run() + + +def run_python_file(args: list[str]) -> None: + """Run a Python file as if it were the main program on the command line. + + `args` is the argument array to present as sys.argv, including the first + element naming the file being executed. `package` is the name of the + enclosing package, if any. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=False) + runner.prepare() + runner.run() + + +def make_code_from_py(filename: str) -> CodeType: + """Get source from `filename` and make a code object of it.""" + try: + source = get_python_source(filename) + except (OSError, NoSource) as exc: + raise NoSource(f"No file to run: '{filename}'") from exc + + code = compile(source, filename, mode="exec", dont_inherit=True) + return code + + +def make_code_from_pyc(filename: str) -> CodeType: + """Get a code object from a .pyc file.""" + try: + fpyc = open(filename, "rb") + except OSError as exc: + raise NoCode(f"No file to run: '{filename}'") from exc + + with fpyc: + # First four bytes are a version-specific magic number. It has to + # match or we won't run the file. + magic = fpyc.read(4) + if magic != PYC_MAGIC_NUMBER: + raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}") + + flags = struct.unpack(" None: + """Set the directory that `relative_filename` will be relative to.""" + global RELATIVE_DIR, CANONICAL_FILENAME_CACHE + + # The current directory + abs_curdir = abs_file(os.curdir) + if not abs_curdir.endswith(os.sep): + # Suffix with separator only if not at the system root + abs_curdir = abs_curdir + os.sep + + # The absolute path to our current directory. + RELATIVE_DIR = os.path.normcase(abs_curdir) + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + CANONICAL_FILENAME_CACHE = {} + + +def relative_directory() -> str: + """Return the directory that `relative_filename` is relative to.""" + return RELATIVE_DIR + + +def relative_filename(filename: str) -> str: + """Return the relative form of `filename`. + + The file name will be relative to the current directory when the + `set_relative_directory` was called. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(RELATIVE_DIR): + filename = filename[len(RELATIVE_DIR) :] + return filename + + +def canonical_filename(filename: str) -> str: + """Return a canonical file name for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in CANONICAL_FILENAME_CACHE: + cf = filename + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: + continue # type: ignore[unreachable] + f = os.path.join(path, filename) + try: + exists = os.path.exists(f) + except UnicodeError: + exists = False + if exists: + cf = f + break + cf = abs_file(cf) + CANONICAL_FILENAME_CACHE[filename] = cf + return CANONICAL_FILENAME_CACHE[filename] + + +def flat_rootname(filename: str) -> str: + """A base for a flat file name to correspond to this file. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py will return 'z_86bbcbe134d28fd2_c_py' + + """ + dirname, basename = ntpath.split(filename) + if dirname: + fp = hashlib.new( + "sha3_256", + dirname.encode("UTF-8"), + usedforsecurity=False, + ).hexdigest()[:16] + prefix = f"z_{fp}_" + else: + prefix = "" + return prefix + basename.replace(".", "_") + + +if env.WINDOWS: + _ACTUAL_PATH_CACHE: dict[str, str] = {} + _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {} + + def actual_path(path: str) -> str: + """Get the actual path of `path`, including the correct case.""" + if path in _ACTUAL_PATH_CACHE: + return _ACTUAL_PATH_CACHE[path] + + head, tail = os.path.split(path) + if not tail: + # This means head is the drive spec: normalize it. + actpath = head.upper() + elif not head: + actpath = tail + else: + head = actual_path(head) + if head in _ACTUAL_PATH_LIST_CACHE: + files = _ACTUAL_PATH_LIST_CACHE[head] + else: + try: + files = os.listdir(head) + except Exception: + # This will raise OSError, or this bizarre TypeError: + # https://bugs.python.org/issue1776160 + files = [] + _ACTUAL_PATH_LIST_CACHE[head] = files + normtail = os.path.normcase(tail) + for f in files: + if os.path.normcase(f) == normtail: + tail = f + break + actpath = os.path.join(head, tail) + _ACTUAL_PATH_CACHE[path] = actpath + return actpath + +else: + + def actual_path(path: str) -> str: + """The actual path for non-Windows platforms.""" + return path + + +def abs_file(path: str) -> str: + """Return the absolute normalized form of `path`.""" + return actual_path(os.path.abspath(os.path.realpath(path))) + + +def zip_location(filename: str) -> tuple[str, str] | None: + """Split a filename into a zipfile / inner name pair. + + Only return a pair if the zipfile exists. No check is made if the inner + name is in the zipfile. + + """ + for ext in [".zip", ".whl", ".egg", ".pex", ".par"]: + zipbase, extension, inner = filename.partition(ext + sep(filename)) + if extension: + zipfile = zipbase + ext + if os.path.exists(zipfile): + return zipfile, inner + return None + + +def source_exists(path: str) -> bool: + """Determine if a source file path exists.""" + if os.path.exists(path): + return True + + if zip_location(path): + # If zip_location returns anything, then it's a zipfile that + # exists. That's good enough for us. + return True + + return False + + +def python_reported_file(filename: str) -> str: + """Return the string as Python would describe this file name.""" + return os.path.abspath(filename) + + +def isabs_anywhere(filename: str) -> bool: + """Is `filename` an absolute path on any OS?""" + return ntpath.isabs(filename) or posixpath.isabs(filename) + + +def prep_patterns(patterns: Iterable[str]) -> list[str]: + """Prepare the file patterns for use in a `GlobMatcher`. + + If a pattern starts with a wildcard, it is used as a pattern + as-is. If it does not start with a wildcard, then it is made + absolute with the current directory. + + If `patterns` is None, an empty list is returned. + + """ + prepped = [] + for p in patterns or []: + prepped.append(p) + if not p.startswith(("*", "?")): + prepped.append(abs_file(p)) + return prepped + + +class TreeMatcher: + """A matcher for files in a tree. + + Construct with a list of paths, either files or directories. Paths match + with the `match` method if they are one of the files, or if they are + somewhere in a subtree rooted at one of the directories. + + """ + + def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: + self.original_paths: list[str] = human_sorted(paths) + self.paths = [os.path.normcase(p) for p in paths] + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.original_paths + + def match(self, fpath: str) -> bool: + """Does `fpath` indicate a file in one of our trees?""" + fpath = os.path.normcase(fpath) + for p in self.paths: + if fpath.startswith(p): + if fpath == p: + # This is the same file! + return True + if fpath[len(p)] == os.sep: + # This is a file in the directory + return True + return False + + +class ModuleMatcher: + """A matcher for modules in a tree.""" + + def __init__(self, module_names: Iterable[str], name: str = "unknown") -> None: + self.modules = list(module_names) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.modules + + def match(self, module_name: str) -> bool: + """Does `module_name` indicate a module in one of our packages?""" + if not module_name: + return False + + for m in self.modules: + if module_name.startswith(m): + if module_name == m: + return True + if module_name[len(m)] == ".": + # This is a module in the package + return True + + return False + + +class GlobMatcher: + """A matcher for files by file name pattern.""" + + def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: + self.pats = list(pats) + self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.pats + + def match(self, fpath: str) -> bool: + """Does `fpath` match one of our file name patterns?""" + return self.re.match(fpath) is not None + + +def sep(s: str) -> str: + """Find the path separator used in this string, or os.sep if none.""" + if sep_match := re.search(r"[\\/]", s): + the_sep = sep_match[0] + else: + the_sep = os.sep + return the_sep + + +# Tokenizer for _glob_to_regex. +# None as a sub means disallowed. +# fmt: off +G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ + (r"\*\*\*+", None), # Can't have *** + (r"[^/]+\*\*+", None), # Can't have x** + (r"\*\*+[^/]+", None), # Can't have **x + (r"\*\*/\*\*", None), # Can't have **/** + (r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing. + (r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix. + (r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none + (r"/", r"[/\\\\]"), # / matches either slash or backslash + (r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes + (r"\?", r"[^/\\\\]"), # ? matches one non slash-like + (r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f] + (r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves + (r"[\[\]]", None), # Can't have single square brackets + (r".", r"\\\g<0>"), # Anything else is escaped to be safe +]] +# fmt: on + + +def _glob_to_regex(pattern: str) -> str: + """Convert a file-path glob pattern into a regex.""" + # Turn all backslashes into slashes to simplify the tokenizer. + pattern = pattern.replace("\\", "/") + if "/" not in pattern: + pattern = f"**/{pattern}" + path_rx = [] + pos = 0 + while pos < len(pattern): + for rx, sub in G2RX_TOKENS: # pragma: always breaks + if m := rx.match(pattern, pos=pos): + if sub is None: + raise ConfigError(f"File pattern can't include {m[0]!r}") + path_rx.append(m.expand(sub)) + pos = m.end() + break + return "".join(path_rx) + + +def globs_to_regex( + patterns: Iterable[str], + case_insensitive: bool = False, + partial: bool = False, +) -> re.Pattern[str]: + """Convert glob patterns to a compiled regex that matches any of them. + + Slashes are always converted to match either slash or backslash, for + Windows support, even when running elsewhere. + + If the pattern has no slash or backslash, then it is interpreted as + matching a file name anywhere it appears in the tree. Otherwise, the glob + pattern must match the whole file path. + + If `partial` is true, then the pattern will match if the target string + starts with the pattern. Otherwise, it must match the entire string. + + Returns: a compiled regex object. Use the .match method to compare target + strings. + + """ + flags = 0 + if case_insensitive: + flags |= re.IGNORECASE + rx = join_regex(map(_glob_to_regex, patterns)) + if not partial: + rx = rf"(?:{rx})\Z" + compiled = re.compile(rx, flags=flags) + return compiled + + +class PathAliases: + """A collection of aliases for paths. + + When combining data files from remote machines, often the paths to source + code are different, for example, due to OS differences, or because of + serialized checkouts on continuous integration machines. + + A `PathAliases` object tracks a list of pattern/result pairs, and can + map a path through those aliases to produce a unified path. + + """ + + def __init__( + self, + debugfn: Callable[[str], None] | None = None, + relative: bool = False, + ) -> None: + # A list of (original_pattern, regex, result) + self.aliases: list[tuple[str, re.Pattern[str], str]] = [] + self.debugfn = debugfn or (lambda msg: 0) + self.relative = relative + self.pprinted = False + + def pprint(self) -> None: + """Dump the important parts of the PathAliases, for debugging.""" + self.debugfn(f"Aliases (relative={self.relative}):") + for original_pattern, regex, result in self.aliases: + self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}") + + def add(self, pattern: str, result: str) -> None: + """Add the `pattern`/`result` pair to the list of aliases. + + `pattern` is an `glob`-style pattern. `result` is a simple + string. When mapping paths, if a path starts with a match against + `pattern`, then that match is replaced with `result`. This models + isomorphic source trees being rooted at different places on two + different machines. + + `pattern` can't end with a wildcard component, since that would + match an entire tree, and not just its root. + + """ + original_pattern = pattern + pattern_sep = sep(pattern) + + if len(pattern) > 1: + pattern = pattern.rstrip(r"\/") + + # The pattern can't end with a wildcard component. + if pattern.endswith("*"): + raise ConfigError("Pattern must not end with wildcards.") + + # The pattern is meant to match a file path. Let's make it absolute + # unless it already is, or is meant to match any prefix. + if not self.relative: + if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep): + pattern = abs_file(pattern) + if not pattern.endswith(pattern_sep): + pattern += pattern_sep + + # Make a regex from the pattern. + regex = globs_to_regex([pattern], case_insensitive=True, partial=True) + + # Normalize the result: it must end with a path separator. + result_sep = sep(result) + result = result.rstrip(r"\/") + result_sep + self.aliases.append((original_pattern, regex, result)) + + def map(self, path: str, exists: Callable[[str], bool] = source_exists) -> str: + """Map `path` through the aliases. + + `path` is checked against all of the patterns. The first pattern to + match is used to replace the root of the path with the result root. + Only one pattern is ever used. If no patterns match, `path` is + returned unchanged. + + The separator style in the result is made to match that of the result + in the alias. + + `exists` is a function to determine if the resulting path actually + exists. + + Returns the mapped path. If a mapping has happened, this is a + canonical path. If no mapping has happened, it is the original value + of `path` unchanged. + + """ + if not self.pprinted: + self.pprint() + self.pprinted = True + + for original_pattern, regex, result in self.aliases: + if m := regex.match(path): + new = path.replace(m[0], result) + new = new.replace(sep(path), sep(result)) + if not self.relative: + new = canonical_filename(new) + dot_start = result.startswith(("./", ".\\")) and len(result) > 2 + if new.startswith(("./", ".\\")) and not dot_start: + new = new[2:] + if not exists(new): + self.debugfn( + f"Rule {original_pattern!r} changed {path!r} to {new!r} " + + "which doesn't exist, continuing", + ) + continue + self.debugfn( + f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + + f"producing {new!r}", + ) + return new + + # If we get here, no pattern matched. + + if self.relative: + path = relative_filename(path) + + if self.relative and not isabs_anywhere(path): + # Auto-generate a pattern to implicitly match relative files + parts = re.split(r"[/\\]", path) + if len(parts) > 1: + dir1 = parts[0] + pattern = f"*/{dir1}" + regex_pat = rf"^(.*[\\/])?{re.escape(dir1)}[\\/]" + result = f"{dir1}{os.sep}" + # Only add a new pattern if we don't already have this pattern. + if not any(p == pattern for p, _, _ in self.aliases): + self.debugfn( + f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}", + ) + self.aliases.append((pattern, re.compile(regex_pat), result)) + return self.map(path, exists=exists) + + self.debugfn(f"No rules match, path {path!r} is unchanged") + return path + + +def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]: + """Yield all of the importable Python files in `dirname`, recursively. + + To be importable, the files have to be in a directory with a __init__.py, + except for `dirname` itself, which isn't required to have one. The + assumption is that `dirname` was specified directly, so the user knows + best, but sub-directories are checked for a __init__.py to be sure we only + find the importable files. + + If `include_namespace_packages` is True, then the check for __init__.py + files is skipped. + + Files with strange characters are skipped, since they couldn't have been + imported, and are probably editor side-files. + + """ + for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): + if not include_namespace_packages: + if i > 0 and "__init__.py" not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue + for filename in filenames: + # We're only interested in files that look like reasonable Python + # files: Must end with .py or .pyw, and must not have certain funny + # characters that probably mean they are editor junk. + if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): + yield os.path.join(dirpath, filename) + + +# Globally set the relative directory. +set_relative_directory() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/html.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/html.py new file mode 100644 index 0000000..1fff595 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/html.py @@ -0,0 +1,856 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""HTML reporting for coverage.py.""" + +from __future__ import annotations + +import collections +import dataclasses +import datetime +import functools +import json +import os +import re +import string +from collections.abc import Iterable +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +import coverage +from coverage.data import CoverageData, add_data_to_hash +from coverage.exceptions import NoDataError +from coverage.files import flat_rootname +from coverage.misc import ( + Hasher, + ensure_dir, + file_be_gone, + format_local_datetime, + human_sorted, + isolate_module, + plural, + stdout_link, +) +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, AnalysisNarrower, Numbers +from coverage.templite import Templite +from coverage.types import TLineNo, TMorf +from coverage.version import __url__ + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.plugins import FileReporter + + +os = isolate_module(os) + + +def data_filename(fname: str) -> str: + """Return the path to an "htmlfiles" data file of ours.""" + static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") + static_filename = os.path.join(static_dir, fname) + return static_filename + + +def read_data(fname: str) -> str: + """Return the contents of a data file of ours.""" + with open(data_filename(fname), encoding="utf-8") as data_file: + return data_file.read() + + +def write_html(fname: str, html: str) -> None: + """Write `html` to `fname`, properly encoded.""" + html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" + with open(fname, "wb") as fout: + fout.write(html.encode("ascii", "xmlcharrefreplace")) + + +@dataclass +class LineData: + """The data for each source line of HTML output.""" + + tokens: list[tuple[str, str]] + number: TLineNo + category: str + contexts: list[str] + contexts_label: str + context_list: list[str] + short_annotations: list[str] + long_annotations: list[str] + html: str = "" + context_str: str | None = None + annotate: str | None = None + annotate_long: str | None = None + css_class: str = "" + + +@dataclass +class FileData: + """The data for each source file of HTML output.""" + + relative_filename: str + nums: Numbers + lines: list[LineData] + + +@dataclass +class IndexItem: + """Information for each index entry, to render an index page.""" + + url: str = "" + file: str = "" + description: str = "" + nums: Numbers = field(default_factory=Numbers) + + +@dataclass +class IndexPage: + """Data for each index page.""" + + noun: str + plural: str + filename: str + summaries: list[IndexItem] + totals: Numbers + skipped_covered_count: int + skipped_empty_count: int + + +class HtmlDataGeneration: + """Generate structured data to be turned into HTML reports.""" + + EMPTY = "(empty)" + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + self.data = self.coverage.get_data() + self.has_arcs = self.data.has_arcs() + if self.config.show_contexts: + if self.data.measured_contexts() == {""}: + self.coverage._warn("No contexts were measured") + self.data.set_query_contexts(self.config.report_contexts) + + def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: + """Produce the data needed for one file's report.""" + if self.has_arcs: + missing_branch_arcs = analysis.missing_branch_arcs() + arcs_executed = analysis.arcs_executed + else: + missing_branch_arcs = {} + arcs_executed = [] + + if self.config.show_contexts: + contexts_by_lineno = self.data.contexts_by_lineno(analysis.filename) + + lines = [] + branch_stats = analysis.branch_stats() + multiline_map = {} + if hasattr(fr, "multiline_map"): + multiline_map = fr.multiline_map() + + for lineno, tokens in enumerate(fr.source_token_lines(), start=1): + # Figure out how to mark this line. + category = category2 = "" + short_annotations = [] + long_annotations = [] + + if lineno in analysis.excluded: + category = "exc" + elif lineno in analysis.missing: + category = "mis" + elif self.has_arcs and lineno in missing_branch_arcs: + category = "par" + mba = missing_branch_arcs[lineno] + if len(mba) == branch_stats[lineno][0]: + # None of the branches were taken from this line. + short_annotations.append("anywhere") + long_annotations.append( + f"line {lineno} didn't jump anywhere: it always raised an exception." + ) + else: + for b in missing_branch_arcs[lineno]: + if b < 0: + short_annotations.append("exit") + else: + short_annotations.append(str(b)) + long_annotations.append( + fr.missing_arc_description(lineno, b, arcs_executed) + ) + elif lineno in analysis.statements: + category = "run" + elif first_line := multiline_map.get(lineno): + if first_line in analysis.excluded: + category2 = "exc2" + elif first_line in analysis.missing: + category2 = "mis2" + elif self.has_arcs and first_line in missing_branch_arcs: + category2 = "par2" + # I don't understand why this last condition is marked as + # partial. If I add an else with an exception, the exception + # is raised. + elif first_line in analysis.statements: # pragma: part covered + category2 = "run2" + + contexts = [] + contexts_label = "" + context_list = [] + if category and self.config.show_contexts: + contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) + if contexts == [self.EMPTY]: + contexts_label = self.EMPTY + else: + contexts_label = f"{len(contexts)} ctx" + context_list = contexts + + lines.append( + LineData( + tokens=tokens, + number=lineno, + category=category or category2, + contexts=contexts, + contexts_label=contexts_label, + context_list=context_list, + short_annotations=short_annotations, + long_annotations=long_annotations, + ) + ) + + file_data = FileData( + relative_filename=fr.relative_filename(), + nums=analysis.numbers, + lines=lines, + ) + + return file_data + + +class FileToReport: + """A file we're considering reporting.""" + + def __init__(self, fr: FileReporter, analysis: Analysis) -> None: + self.fr = fr + self.analysis = analysis + self.rootname = flat_rootname(fr.relative_filename()) + self.html_filename = self.rootname + ".html" + self.prev_html = self.next_html = "" + + +HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~" + + +@functools.cache +def encode_int(n: int) -> str: + """Create a short HTML-safe string from an integer, using HTML_SAFE.""" + if n == 0: + return HTML_SAFE[0] + + r = [] + while n: + n, t = divmod(n, len(HTML_SAFE)) + r.append(HTML_SAFE[t]) + return "".join(r) + + +def copy_with_cache_bust(src: str, dest_dir: str) -> str: + """Copy `src` to `dest_dir`, adding a hash to the name. + + Returns the updated destination file name with hash. + """ + with open(src, "rb") as f: + text = f.read() + h = Hasher() + h.update(text) + cache_bust = h.hexdigest()[:8] + src_base = os.path.basename(src) + dest = src_base.replace(".", f"_cb_{cache_bust}.") + with open(os.path.join(dest_dir, dest), "wb") as f: + f.write(text) + return dest + + +class HtmlReporter: + """HTML reporting.""" + + # These files will be copied from the htmlfiles directory to the output + # directory. + STATIC_FILES = [ + "style.css", + "coverage_html.js", + "keybd_closed.png", + "favicon_32.png", + ] + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + self.directory = self.config.html_dir + + self.skip_covered = self.config.html_skip_covered + if self.skip_covered is None: + self.skip_covered = self.config.skip_covered + self.skip_empty = self.config.html_skip_empty + if self.skip_empty is None: + self.skip_empty = self.config.skip_empty + + title = self.config.html_title + + self.extra_css = bool(self.config.extra_css) + + self.data = self.coverage.get_data() + self.has_arcs = self.data.has_arcs() + + self.index_pages: dict[str, IndexPage] = { + "file": self.new_index_page("file", "files"), + } + self.incr = IncrementalChecker(self.directory) + self.datagen = HtmlDataGeneration(self.coverage) + self.directory_was_empty = False + self.first_fr = None + self.final_fr = None + + self.template_globals = { + # Functions available in the templates. + "escape": escape, + "pair": pair, + "len": len, + # Constants for this report. + "__url__": __url__, + "__version__": coverage.__version__, + "title": title, + "time_stamp": format_local_datetime(datetime.datetime.now()), + "extra_css": self.extra_css, + "has_arcs": self.has_arcs, + "show_contexts": self.config.show_contexts, + "statics": {}, + # Constants for all reports. + # These css classes determine which lines are highlighted by default. + "category": { + "exc": "exc show_exc", + "mis": "mis show_mis", + "par": "par run show_par", + "run": "run", + "exc2": "exc exc2 show_exc", + "mis2": "mis mis2 show_mis", + "par2": "par par2 ru2 show_par", + "run2": "run run2", + }, + } + self.index_tmpl = Templite(read_data("index.html"), self.template_globals) + self.pyfile_html_source = read_data("pyfile.html") + self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) + + def new_index_page(self, noun: str, plural_noun: str) -> IndexPage: + """Create an IndexPage for a kind of region.""" + return IndexPage( + noun=noun, + plural=plural_noun, + filename="index.html" if noun == "file" else f"{noun}_index.html", + summaries=[], + totals=Numbers(precision=self.config.precision), + skipped_covered_count=0, + skipped_empty_count=0, + ) + + def report(self, morfs: Iterable[TMorf] | None) -> float: + """Generate an HTML report for `morfs`. + + `morfs` is a list of modules or file names. + + """ + # Read the status data and check that this run used the same + # global data as the last run. + self.incr.read() + self.incr.check_global_data(self.config, self.pyfile_html_source) + + # Process all the files. For each page we need to supply a link + # to the next and previous page. + files_to_report = [] + + have_data = False + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + have_data = True + ftr = FileToReport(fr, analysis) + if self.should_report(analysis, self.index_pages["file"]): + files_to_report.append(ftr) + else: + file_be_gone(os.path.join(self.directory, ftr.html_filename)) + + if not have_data: + raise NoDataError("No data to report.") + + self.make_directory() + self.make_local_static_report_files() + + if files_to_report: + for ftr1, ftr2 in zip(files_to_report[:-1], files_to_report[1:]): + ftr1.next_html = ftr2.html_filename + ftr2.prev_html = ftr1.html_filename + files_to_report[0].prev_html = "index.html" + files_to_report[-1].next_html = "index.html" + + for ftr in files_to_report: + self.write_html_page(ftr) + for noun, plural_noun in ftr.fr.code_region_kinds(): + if noun not in self.index_pages: + self.index_pages[noun] = self.new_index_page(noun, plural_noun) + + # Write the index page. + if files_to_report: + first_html = files_to_report[0].html_filename + final_html = files_to_report[-1].html_filename + else: + first_html = final_html = "index.html" + self.write_file_index_page(first_html, final_html) + + # Write function and class index pages. + self.write_region_index_pages(files_to_report) + + return ( + self.index_pages["file"].totals.n_statements + and self.index_pages["file"].totals.pc_covered + ) + + def make_directory(self) -> None: + """Make sure our htmlcov directory exists.""" + ensure_dir(self.directory) + if not os.listdir(self.directory): + self.directory_was_empty = True + + def copy_static_file(self, src: str, slug: str = "") -> None: + """Copy a static file into the output directory with cache busting.""" + dest = copy_with_cache_bust(src, self.directory) + if not slug: + slug = os.path.basename(src).replace(".", "_") + self.template_globals["statics"][slug] = dest # type: ignore + + def make_local_static_report_files(self) -> None: + """Make local instances of static files for HTML report.""" + + # The files we provide must always be copied. + for static in self.STATIC_FILES: + self.copy_static_file(data_filename(static)) + + # The user may have extra CSS they want copied. + if self.extra_css: + assert self.config.extra_css is not None + self.copy_static_file(self.config.extra_css, slug="extra_css") + + # Only write the .gitignore file if the directory was originally empty. + # .gitignore can't be copied from the source tree because if it was in + # the source tree, it would stop the static files from being checked in. + if self.directory_was_empty: + with open(os.path.join(self.directory, ".gitignore"), "w", encoding="utf-8") as fgi: + fgi.write("# Created by coverage.py\n*\n") + + def should_report(self, analysis: Analysis, index_page: IndexPage) -> bool: + """Determine if we'll report this file or region.""" + # Get the numbers for this file. + nums = analysis.numbers + index_page.totals += nums + + if self.skip_covered: + # Don't report on 100% files. + no_missing_lines = (nums.n_missing == 0) # fmt: skip + no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip + if no_missing_lines and no_missing_branches: + index_page.skipped_covered_count += 1 + return False + + if self.skip_empty: + # Don't report on empty files. + if nums.n_statements == 0: + index_page.skipped_empty_count += 1 + return False + + return True + + def write_html_page(self, ftr: FileToReport) -> None: + """Generate an HTML page for one source file. + + If the page on disk is already correct based on our incremental status + checking, then the page doesn't have to be generated, and this function + only does page summary bookkeeping. + + """ + # Find out if the page on disk is already correct. + if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname): + self.index_pages["file"].summaries.append(self.incr.index_info(ftr.rootname)) + return + + # Write the HTML page for this source file. + file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis) + + contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts) + context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())} + if context_codes: + contexts_json = json.dumps( + {encode_int(v): k for (k, v) in context_codes.items()}, + indent=2, + ) + else: + contexts_json = None + + for ldata in file_data.lines: + # Build the HTML for the line. + html_parts = [] + for tok_type, tok_text in ldata.tokens: + if tok_type == "ws": + html_parts.append(escape(tok_text)) + else: + tok_html = escape(tok_text) or " " + html_parts.append(f'{tok_html}') + ldata.html = "".join(html_parts) + if ldata.context_list: + encoded_contexts = [ + encode_int(context_codes[c_context]) for c_context in ldata.context_list + ] + code_width = max(len(ec) for ec in encoded_contexts) + ldata.context_str = str(code_width) + "".join( + ec.ljust(code_width) for ec in encoded_contexts + ) + else: + ldata.context_str = "" + + if ldata.short_annotations: + # 202F is NARROW NO-BREAK SPACE. + # 219B is RIGHTWARDS ARROW WITH STROKE. + ldata.annotate = ",   ".join( + f"{ldata.number} ↛ {d}" for d in ldata.short_annotations + ) + else: + ldata.annotate = None + + if ldata.long_annotations: + longs = ldata.long_annotations + # A line can only have two branch destinations. If there were + # two missing, we would have written one as "always raised." + assert len(longs) == 1, ( + f"Had long annotations in {ftr.fr.relative_filename()}: {longs}" + ) + ldata.annotate_long = longs[0] + else: + ldata.annotate_long = None + + css_classes = [] + if ldata.category: + css_classes.append( + self.template_globals["category"][ldata.category], # type: ignore[index] + ) + ldata.css_class = " ".join(css_classes) or "pln" + + html_path = os.path.join(self.directory, ftr.html_filename) + html = self.source_tmpl.render( + { + **file_data.__dict__, + "contexts_json": contexts_json, + "prev_html": ftr.prev_html, + "next_html": ftr.next_html, + } + ) + write_html(html_path, html) + + # Save this file's information for the index page. + index_info = IndexItem( + url=ftr.html_filename, + file=escape(ftr.fr.relative_filename()), + nums=ftr.analysis.numbers, + ) + self.index_pages["file"].summaries.append(index_info) + self.incr.set_index_info(ftr.rootname, index_info) + + def write_file_index_page(self, first_html: str, final_html: str) -> None: + """Write the file index page for this report.""" + index_file = self.write_index_page( + self.index_pages["file"], + first_html=first_html, + final_html=final_html, + ) + + print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}") + self.coverage._message(f"Wrote HTML report to {print_href}") + + # Write the latest hashes for next time. + self.incr.write() + + def write_region_index_pages(self, files_to_report: Iterable[FileToReport]) -> None: + """Write the other index pages for this report.""" + for ftr in files_to_report: + region_nouns = [pair[0] for pair in ftr.fr.code_region_kinds()] + num_lines = len(ftr.fr.source().splitlines()) + regions = ftr.fr.code_regions() + + for noun in region_nouns: + page_data = self.index_pages[noun] + + outside_lines = set(range(1, num_lines + 1)) + for region in regions: + if region.kind != noun: + continue + outside_lines -= region.lines + + narrower = AnalysisNarrower(ftr.analysis) + narrower.add_regions(r.lines for r in regions if r.kind == noun) + narrower.add_regions([outside_lines]) + + for region in regions: + if region.kind != noun: + continue + analysis = narrower.narrow(region.lines) + if not self.should_report(analysis, page_data): + continue + sorting_name = region.name.rpartition(".")[-1].lstrip("_") + page_data.summaries.append( + IndexItem( + url=f"{ftr.html_filename}#t{region.start}", + file=escape(ftr.fr.relative_filename()), + description=( + f"" + + escape(region.name) + + "" + ), + nums=analysis.numbers, + ) + ) + + analysis = narrower.narrow(outside_lines) + if self.should_report(analysis, page_data): + page_data.summaries.append( + IndexItem( + url=ftr.html_filename, + file=escape(ftr.fr.relative_filename()), + description=( + "" + + f"(no {escape(noun)})" + + "" + ), + nums=analysis.numbers, + ) + ) + + for noun, index_page in self.index_pages.items(): + if noun != "file": + self.write_index_page(index_page) + + def write_index_page(self, index_page: IndexPage, **kwargs: str) -> str: + """Write an index page specified by `index_page`. + + Returns the filename created. + """ + skipped_covered_msg = skipped_empty_msg = "" + if n := index_page.skipped_covered_count: + word = plural(n, index_page.noun, index_page.plural) + skipped_covered_msg = f"{n} {word} skipped due to complete coverage." + if n := index_page.skipped_empty_count: + word = plural(n, index_page.noun, index_page.plural) + skipped_empty_msg = f"{n} empty {word} skipped." + + index_buttons = [ + { + "label": ip.plural.title(), + "url": ip.filename if ip.noun != index_page.noun else "", + "current": ip.noun == index_page.noun, + } + for ip in self.index_pages.values() + ] + render_data = { + "regions": index_page.summaries, + "totals": index_page.totals, + "noun": index_page.noun, + "region_noun": index_page.noun if index_page.noun != "file" else "", + "skip_covered": self.skip_covered, + "skipped_covered_msg": skipped_covered_msg, + "skipped_empty_msg": skipped_empty_msg, + "first_html": "", + "final_html": "", + "index_buttons": index_buttons, + } + render_data.update(kwargs) + html = self.index_tmpl.render(render_data) + + index_file = os.path.join(self.directory, index_page.filename) + write_html(index_file, html) + return index_file + + +@dataclass +class FileInfo: + """Summary of the information from last rendering, to avoid duplicate work.""" + + hash: str = "" + index: IndexItem = field(default_factory=IndexItem) + + +class IncrementalChecker: + """Logic and data to support incremental reporting. + + When generating an HTML report, often only a few of the source files have + changed since the last time we made the HTML report. This means previously + created HTML pages can be reused without generating them again, speeding + the command. + + This class manages a JSON data file that captures enough information to + know whether an HTML page for a .py file needs to be regenerated or not. + The data file also needs to store all the information needed to create the + entry for the file on the index page so that if the HTML page is reused, + the index page can still be created to refer to it. + + The data looks like:: + + { + "note": "This file is an internal implementation detail ...", + // A fixed number indicating the data format. STATUS_FORMAT + "format": 5, + // The version of coverage.py + "version": "7.4.4", + // A hash of a number of global things, including the configuration + // settings and the pyfile.html template itself. + "globals": "540ee119c15d52a68a53fe6f0897346d", + "files": { + // An entry for each source file keyed by the flat_rootname(). + "z_7b071bdc2a35fa80___init___py": { + // Hash of the source, the text of the .py file. + "hash": "e45581a5b48f879f301c0f30bf77a50c", + // Information for the index.html file. + "index": { + "url": "z_7b071bdc2a35fa80___init___py.html", + "file": "cogapp/__init__.py", + "description": "", + // The Numbers for this file. + "nums": { "precision": 2, "n_files": 1, "n_statements": 43, ... } + } + }, + ... + } + } + + """ + + STATUS_FILE = "status.json" + STATUS_FORMAT = 5 + NOTE = ( + "This file is an internal implementation detail to speed up HTML report" + + " generation. Its format can change at any time. You might be looking" + + " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json" + ) + + def __init__(self, directory: str) -> None: + self.directory = directory + self._reset() + + def _reset(self) -> None: + """Initialize to empty. Causes all files to be reported.""" + self.globals = "" + self.files: dict[str, FileInfo] = {} + + def read(self) -> None: + """Read the information we stored last time.""" + try: + status_file = os.path.join(self.directory, self.STATUS_FILE) + with open(status_file, encoding="utf-8") as fstatus: + status = json.load(fstatus) + except (OSError, ValueError): + # Status file is missing or malformed. + usable = False + else: + if status["format"] != self.STATUS_FORMAT: + usable = False + elif status["version"] != coverage.__version__: + usable = False + else: + usable = True + + if usable: + self.files = {} + for filename, filedict in status["files"].items(): + indexdict = filedict["index"] + index_item = IndexItem(**indexdict) + index_item.nums = Numbers(**indexdict["nums"]) + fileinfo = FileInfo( + hash=filedict["hash"], + index=index_item, + ) + self.files[filename] = fileinfo + self.globals = status["globals"] + else: + self._reset() + + def write(self) -> None: + """Write the current status.""" + status_file = os.path.join(self.directory, self.STATUS_FILE) + status_data = { + "note": self.NOTE, + "format": self.STATUS_FORMAT, + "version": coverage.__version__, + "globals": self.globals, + "files": {fname: dataclasses.asdict(finfo) for fname, finfo in self.files.items()}, + } + with open(status_file, "w", encoding="utf-8") as fout: + json.dump(status_data, fout, separators=(",", ":")) + + def check_global_data(self, *data: Any) -> None: + """Check the global data that can affect incremental reporting. + + Pass in whatever global information could affect the content of the + HTML pages. If the global data has changed since last time, this will + clear the data so that all files are regenerated. + + """ + m = Hasher() + for d in data: + m.update(d) + these_globals = m.hexdigest() + if self.globals != these_globals: + self._reset() + self.globals = these_globals + + def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool: + """Can we skip reporting this file? + + `data` is a CoverageData object, `fr` is a `FileReporter`, and + `rootname` is the name being used for the file. + + Returns True if the HTML page is fine as-is, False if we need to recreate + the HTML page. + + """ + m = Hasher() + m.update(fr.source().encode("utf-8")) + add_data_to_hash(data, fr.filename, m) + this_hash = m.hexdigest() + + file_info = self.files.setdefault(rootname, FileInfo()) + + if this_hash == file_info.hash: + # Nothing has changed to require the file to be reported again. + return True + else: + # File has changed, record the latest hash and force regeneration. + file_info.hash = this_hash + return False + + def index_info(self, fname: str) -> IndexItem: + """Get the information for index.html for `fname`.""" + return self.files.get(fname, FileInfo()).index + + def set_index_info(self, fname: str, info: IndexItem) -> None: + """Set the information for index.html for `fname`.""" + self.files.setdefault(fname, FileInfo()).index = info + + +# Helpers for templates and generating HTML + + +def escape(t: str) -> str: + """HTML-escape the text in `t`. + + This is only suitable for HTML text, not attributes. + + """ + # Convert HTML special chars into HTML entities. + return t.replace("&", "&").replace("<", "<") + + +def pair(ratio: tuple[int, int]) -> str: + """Format a pair of numbers so JavaScript can read them in an attribute.""" + return "{} {}".format(*ratio) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/coverage_html.js b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/coverage_html.js new file mode 100644 index 0000000..851d4b4 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/coverage_html.js @@ -0,0 +1,733 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// General helpers +function debounce(callback, wait) { + let timeoutId = null; + return function(...args) { + clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + callback.apply(this, args); + }, wait); + }; +}; + +function checkVisible(element) { + const rect = element.getBoundingClientRect(); + const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); + const viewTop = 30; + return !(rect.bottom < viewTop || rect.top >= viewBottom); +} + +function on_click(sel, fn) { + const elt = document.querySelector(sel); + if (elt) { + elt.addEventListener("click", fn); + } +} + +// Helpers for table sorting +function getCellValue(row, column = 0) { + const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.childElementCount == 1) { + var child = cell.firstElementChild; + if (child.tagName === "A") { + child = child.firstElementChild; + } + if (child instanceof HTMLDataElement && child.value) { + return child.value; + } + } + return cell.innerText || cell.textContent; +} + +function rowComparator(rowA, rowB, column = 0) { + let valueA = getCellValue(rowA, column); + let valueB = getCellValue(rowB, column); + if (!isNaN(valueA) && !isNaN(valueB)) { + return valueA - valueB; + } + return valueA.localeCompare(valueB, undefined, {numeric: true}); +} + +function sortColumn(th) { + // Get the current sorting direction of the selected header, + // clear state on other headers and then set the new sorting direction. + const currentSortOrder = th.getAttribute("aria-sort"); + [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); + var direction; + if (currentSortOrder === "none") { + direction = th.dataset.defaultSortOrder || "ascending"; + } + else if (currentSortOrder === "ascending") { + direction = "descending"; + } + else { + direction = "ascending"; + } + th.setAttribute("aria-sort", direction); + + const column = [...th.parentElement.cells].indexOf(th) + + // Sort all rows and afterwards append them in order to move them in the DOM. + Array.from(th.closest("table").querySelectorAll("tbody tr")) + .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1)) + .forEach(tr => tr.parentElement.appendChild(tr)); + + // Save the sort order for next time. + if (th.id !== "region") { + let th_id = "file"; // Sort by file if we don't have a column id + let current_direction = direction; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)) + } + localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ + "th_id": th.id, + "direction": current_direction + })); + if (th.id !== th_id || document.getElementById("region")) { + // Sort column has changed, unset sorting by function or class. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": false, + "region_direction": current_direction + })); + } + } + else { + // Sort column has changed to by function or class, remember that. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": true, + "region_direction": direction + })); + } +} + +// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + document.querySelectorAll("[data-shortcut]").forEach(element => { + document.addEventListener("keypress", event => { + if (event.target.tagName.toLowerCase() === "input") { + return; // ignore keypress from search filter + } + if (event.key === element.dataset.shortcut) { + element.click(); + } + }); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Populate the filter and hide100 inputs if there are saved values for them. + const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE); + if (saved_filter_value) { + document.getElementById("filter").value = saved_filter_value; + } + const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE); + if (saved_hide100_value) { + document.getElementById("hide100").checked = JSON.parse(saved_hide100_value); + } + + // Cache elements. + const table = document.querySelector("table.index"); + const table_body_rows = table.querySelectorAll("tbody tr"); + const no_rows = document.getElementById("no_rows"); + + // Observe filter keyevents. + const filter_handler = (event => { + // Keep running total of each metric, first index contains number of shown rows + const totals = new Array(table.rows[0].cells.length).fill(0); + // Accumulate the percentage as fraction + totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection + + var text = document.getElementById("filter").value; + // Store filter value + localStorage.setItem(coverage.FILTER_STORAGE, text); + const casefold = (text === text.toLowerCase()); + const hide100 = document.getElementById("hide100").checked; + // Store hide value. + localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100)); + + // Hide / show elements. + table_body_rows.forEach(row => { + var show = false; + // Check the text filter. + for (let column = 0; column < totals.length; column++) { + cell = row.cells[column]; + if (cell.classList.contains("name")) { + var celltext = cell.textContent; + if (casefold) { + celltext = celltext.toLowerCase(); + } + if (celltext.includes(text)) { + show = true; + } + } + } + + // Check the "hide covered" filter. + if (show && hide100) { + const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" "); + show = (numer !== denom); + } + + if (!show) { + // hide + row.classList.add("hidden"); + return; + } + + // show + row.classList.remove("hidden"); + totals[0]++; + + for (let column = 0; column < totals.length; column++) { + // Accumulate dynamic totals + cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + if (column === totals.length - 1) { + // Last column contains percentage + const [numer, denom] = cell.dataset.ratio.split(" "); + totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection + totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection + } + else { + totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection + } + } + }); + + // Show placeholder if no rows will be displayed. + if (!totals[0]) { + // Show placeholder, hide table. + no_rows.style.display = "block"; + table.style.display = "none"; + return; + } + + // Hide placeholder, show table. + no_rows.style.display = null; + table.style.display = null; + + const footer = table.tFoot.rows[0]; + // Calculate new dynamic sum values based on visible rows. + for (let column = 0; column < totals.length; column++) { + // Get footer cell element. + const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + + // Set value into dynamic footer cell element. + if (column === totals.length - 1) { + // Percentage column uses the numerator and denominator, + // and adapts to the number of decimal places. + const match = /\.([0-9]+)/.exec(cell.textContent); + const places = match ? match[1].length : 0; + const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection + cell.dataset.ratio = `${numer} ${denom}`; + // Check denom to prevent NaN if filtered files contain no statements + cell.textContent = denom + ? `${(numer * 100 / denom).toFixed(places)}%` + : `${(100).toFixed(places)}%`; + } + else { + cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection + } + } + }); + + document.getElementById("filter").addEventListener("input", debounce(filter_handler)); + document.getElementById("hide100").addEventListener("input", debounce(filter_handler)); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + document.getElementById("filter").dispatchEvent(new Event("input")); + document.getElementById("hide100").dispatchEvent(new Event("input")); +}; +coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE"; +coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE"; + +// Set up the click-to-sort columns. +coverage.wire_up_sorting = function () { + document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( + th => th.addEventListener("click", e => sortColumn(e.target)) + ); + + // Look for a localStorage item containing previous sort settings: + let th_id = "file", direction = "ascending"; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)); + } + let by_region = false, region_direction = "ascending"; + const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION); + if (sorted_by_region) { + ({ + by_region, + region_direction + } = JSON.parse(sorted_by_region)); + } + + const region_id = "region"; + if (by_region && document.getElementById(region_id)) { + direction = region_direction; + } + // If we are in a page that has a column with id of "region", sort on + // it if the last sort was by function or class. + let th; + if (document.getElementById(region_id)) { + th = document.getElementById(by_region ? region_id : th_id); + } + else { + th = document.getElementById(th_id); + } + th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); + th.click() +}; + +coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; +coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION"; + +// Loaded on index.html +coverage.index_ready = function () { + coverage.assign_shortkeys(); + coverage.wire_up_filter(); + coverage.wire_up_sorting(); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + + on_click(".button_show_hide_help", coverage.show_hide_help); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function () { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === "t") { + document.querySelector(frag).closest(".n").classList.add("highlight"); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + on_click(".button_toggle_run", coverage.toggle_lines); + on_click(".button_toggle_mis", coverage.toggle_lines); + on_click(".button_toggle_exc", coverage.toggle_lines); + on_click(".button_toggle_par", coverage.toggle_lines); + + on_click(".button_next_chunk", coverage.to_next_chunk_nicely); + on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); + on_click(".button_top_of_page", coverage.to_top); + on_click(".button_first_chunk", coverage.to_first_chunk); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + on_click(".button_to_index", coverage.to_index); + + on_click(".button_show_hide_help", coverage.show_hide_help); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection + } + + coverage.assign_shortkeys(); + coverage.init_scroll_markers(); + coverage.wire_up_sticky_header(); + + document.querySelectorAll("[id^=ctxs]").forEach( + cbox => cbox.addEventListener("click", coverage.expand_contexts) + ); + + // Rebuild scroll markers when the window height changes. + window.addEventListener("resize", coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (event) { + const btn = event.target.closest("button"); + const category = btn.value + const show = !btn.classList.contains("show_" + category); + coverage.set_line_visibilty(category, show); + coverage.build_scroll_markers(); + coverage.filters[category] = show; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (category, should_show) { + const cls = "show_" + category; + const btn = document.querySelector(".button_toggle_" + category); + if (btn) { + if (should_show) { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); + btn.classList.add(cls); + } + else { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); + btn.classList.remove(cls); + } + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return document.getElementById("t" + n)?.closest("p"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.to_prev_file = function () { + window.location = document.getElementById("prevFileLink").href; +} + +coverage.to_next_file = function () { + window.location = document.getElementById("nextFileLink").href; +} + +coverage.to_index = function () { + location.href = document.getElementById("indexLink").href; +} + +coverage.show_hide_help = function () { + const helpCheck = document.getElementById("help_panel_state") + helpCheck.checked = !helpCheck.checked; +} + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + const classes = line_elt?.className; + if (!classes) { + return null; + } + const match = classes.match(/\bshow_\w+\b/); + if (!match) { + return null; + } + return match[0]; +}; + +coverage.to_next_chunk = function () { + const c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + const c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 1 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + if (probe <= 0) { + return; + } + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + const begin = coverage.line_elt(coverage.sel_begin); + const end = coverage.line_elt(coverage.sel_end-1); + + return ( + (checkVisible(begin) ? 1 : 0) + + (checkVisible(end) ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the top line on the screen as selection. + + // This will select the top-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(0, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(1); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the lowest line on the screen as selection. + + // This will select the bottom-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(coverage.lines_len); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (!probe_line) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + // Highlight the lines in the chunk + document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); + for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { + coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); + } + + coverage.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + const element = coverage.line_elt(coverage.sel_begin); + coverage.scroll_window(element.offsetTop - 60); + } +}; + +coverage.scroll_window = function (to_pos) { + window.scroll({top: to_pos, behavior: "smooth"}); +}; + +coverage.init_scroll_markers = function () { + // Init some variables + coverage.lines_len = document.querySelectorAll("#source > p").length; + + // Build html + coverage.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + const temp_scroll_marker = document.getElementById("scroll_marker") + if (temp_scroll_marker) temp_scroll_marker.remove(); + // Don't build markers if the window has no scroll bar. + if (document.body.scrollHeight <= window.innerHeight) { + return; + } + + const marker_scale = window.innerHeight / document.body.scrollHeight; + const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); + + let previous_line = -99, last_mark, last_top; + + const scroll_marker = document.createElement("div"); + scroll_marker.id = "scroll_marker"; + document.getElementById("source").querySelectorAll( + "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" + ).forEach(element => { + const line_top = Math.floor(element.offsetTop * marker_scale); + const line_number = parseInt(element.querySelector(".n a").id.substr(1)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.style.height = `${line_top + line_height - last_top}px`; + } + else { + // Add colored line in scroll_marker block. + last_mark = document.createElement("div"); + last_mark.id = `m${line_number}`; + last_mark.classList.add("marker"); + last_mark.style.height = `${line_height}px`; + last_mark.style.top = `${line_top}px`; + scroll_marker.append(last_mark); + last_top = line_top; + } + + previous_line = line_number; + }); + + // Append last to prevent layout calculation + document.body.append(scroll_marker); +}; + +coverage.wire_up_sticky_header = function () { + const header = document.querySelector("header"); + const header_bottom = ( + header.querySelector(".content h2").getBoundingClientRect().top - + header.getBoundingClientRect().top + ); + + function updateHeader() { + if (window.scrollY > header_bottom) { + header.classList.add("sticky"); + } + else { + header.classList.remove("sticky"); + } + } + + window.addEventListener("scroll", updateHeader); + updateHeader(); +}; + +coverage.expand_contexts = function (e) { + var ctxs = e.target.parentNode.querySelector(".ctxs"); + + if (!ctxs.classList.contains("expanded")) { + var ctxs_text = ctxs.textContent; + var width = Number(ctxs_text[0]); + ctxs.textContent = ""; + for (var i = 1; i < ctxs_text.length; i += width) { + key = ctxs_text.substring(i, i + width).trim(); + ctxs.appendChild(document.createTextNode(contexts[key])); + ctxs.appendChild(document.createElement("br")); + } + ctxs.classList.add("expanded"); + } +}; + +document.addEventListener("DOMContentLoaded", () => { + if (document.body.classList.contains("indexfile")) { + coverage.index_ready(); + } + else { + coverage.pyfile_ready(); + } +}); diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/favicon_32.png b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/favicon_32.png new file mode 100644 index 0000000..8649f04 Binary files /dev/null and b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/favicon_32.png differ diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/index.html b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/index.html new file mode 100644 index 0000000..bb84b44 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/index.html @@ -0,0 +1,164 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt #} + + + + + + {{ title|escape }} + + + {% if extra_css %} + + {% endif %} + + + + +
+
+

{{ title|escape }}: + {{totals.pc_covered_str}}% +

+ + + +
+ +
+ + +
+
+ +

+ {% for ibtn in index_buttons %} + {{ ibtn.label }}{#-#} + {% endfor %} +

+ +

+ coverage.py v{{__version__}}, + created at {{ time_stamp }} +

+
+
+ +
+ + + {# The title="" attr doesn't work in Safari. #} + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + + + {% for region in regions %} + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + {% endfor %} + + + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + +
File{{ region_noun }}statementsmissingexcludedbranchespartialcoverage
{{region.file}}{{region.description}}{{region.nums.n_statements}}{{region.nums.n_missing}}{{region.nums.n_excluded}}{{region.nums.n_branches}}{{region.nums.n_partial_branches}}{{region.nums.pc_covered_str}}%
Total {{totals.n_statements}}{{totals.n_missing}}{{totals.n_excluded}}{{totals.n_branches}}{{totals.n_partial_branches}}{{totals.pc_covered_str}}%
+ +

+ No items found using the specified filter. +

+ + {% if skipped_covered_msg %} +

{{ skipped_covered_msg }}

+ {% endif %} + {% if skipped_empty_msg %} +

{{ skipped_empty_msg }}

+ {% endif %} +
+ + + + + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/keybd_closed.png b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/keybd_closed.png new file mode 100644 index 0000000..ba119c4 Binary files /dev/null and b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/keybd_closed.png differ diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/pyfile.html b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/pyfile.html new file mode 100644 index 0000000..f4cf66a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/pyfile.html @@ -0,0 +1,149 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt #} + + + + + + Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}% + + + {% if extra_css %} + + {% endif %} + + {% if contexts_json %} + + {% endif %} + + + + + +
+
+

+ Coverage for {{relative_filename|escape}}: + {{nums.pc_covered_str}}% +

+ + + +

+ {{nums.n_statements}} statements   + + + + {% if has_arcs %} + + {% endif %} +

+ +

+ « prev     + ^ index     + » next +       + coverage.py v{{__version__}}, + created at {{ time_stamp }} +

+ + +
+
+ +
+ {% for line in lines -%} + {% joined %} +

+ {{line.number}} + {{line.html}}  + {% if line.context_list %} + + {% endif %} + {# Things that should float right in the line. #} + + {% if line.annotate %} + {{line.annotate}} + {{line.annotate_long}} + {% endif %} + {% if line.contexts %} + + {% endif %} + + {# Things that should appear below the line. #} + {% if line.context_str %} + {{ line.context_str }} + {% endif %} +

+ {% endjoined %} + {% endfor %} +
+ + + + + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.css b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.css new file mode 100644 index 0000000..cb0cf4c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.css @@ -0,0 +1,377 @@ +@charset "UTF-8"; +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt */ +/* Don't edit this .css file. Edit the .scss file instead! */ +html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { body { color: #eee; } } + +html > body { font-size: 16px; } + +a:active, a:focus { outline: 2px dashed #007acc; } + +p { font-size: .875em; line-height: 1.4em; } + +table { border-collapse: collapse; } + +td { vertical-align: top; } + +table tr.hidden { display: none !important; } + +p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +a.nav { text-decoration: none; color: inherit; } + +a.nav:hover { text-decoration: underline; color: inherit; } + +.hidden { display: none; } + +header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } + +@media (prefers-color-scheme: dark) { header { background: black; } } + +@media (prefers-color-scheme: dark) { header { border-color: #333; } } + +header .content { padding: 1rem 3.5rem; } + +header h2 { margin-top: .5em; font-size: 1em; } + +header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } } + +@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } } + +header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } } + +header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } + +header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } + +header.sticky .text { display: none; } + +header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } + +header.sticky .content { padding: 0.5rem 3.5rem; } + +header.sticky .content p { font-size: 1em; } + +header.sticky ~ #source { padding-top: 6.5em; } + +main { position: relative; z-index: 1; } + +footer { margin: 1rem 3.5rem; } + +footer .content { padding: 0; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } + +#index { margin: 1rem 0 0 3.5rem; } + +h1 { font-size: 1.25em; display: inline-block; } + +#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; } + +#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } } + +#filter_container #filter:focus { border-color: #007acc; } + +#filter_container :disabled ~ label { color: #ccc; } + +@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } } + +#filter_container label { font-size: .875em; color: #666; } + +@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } } + +header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header button { background: #333; } } + +@media (prefers-color-scheme: dark) { header button { border-color: #444; } } + +header button:active, header button:focus { outline: 2px dashed #007acc; } + +header button.run { background: #eeffee; } + +@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } + +header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } + +header button.mis { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } + +header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } + +header button.exc { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } + +header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } + +header button.par { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { header button.par { background: #650; } } + +header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } + +#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } + +#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } + +#help_panel_wrapper { float: right; position: relative; } + +#keyboard_icon { margin: 5px; } + +#help_panel_state { display: none; } + +#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } + +#help_panel .keyhelp p { margin-top: .75em; } + +#help_panel .legend { font-style: italic; margin-bottom: 1em; } + +.indexfile #help_panel { width: 25em; } + +.pyfile #help_panel { width: 18em; } + +#help_panel_state:checked ~ #help_panel { display: block; } + +kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } + +#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } + +#source p { position: relative; white-space: pre; } + +#source p * { box-sizing: border-box; } + +#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } + +@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } + +#source p .n.highlight { background: #ffdd00; } + +#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } + +#source p .n a:hover { text-decoration: underline; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } + +#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } + +@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } + +#source p .t:hover { background: #f2f2f2; } + +@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } + +#source p .t:hover ~ .r .annotate.long { display: block; } + +#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } + +@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } + +#source p .t .key { font-weight: bold; line-height: 1px; } + +#source p .t .str, #source p .t .fst { color: #0451a5; } + +@media (prefers-color-scheme: dark) { #source p .t .str, #source p .t .fst { color: #9cdcfe; } } + +#source p.mis .t { border-left: 0.2em solid #ff0000; } + +#source p.mis.show_mis .t { background: #fdd; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } + +#source p.mis.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } + +#source p.mis.mis2 .t { border-left: 0.2em dotted #ff0000; } + +#source p.mis.mis2.show_mis .t { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { #source p.mis.mis2.show_mis .t { background: #351b1b; } } + +#source p.mis.mis2.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.mis2.show_mis .t:hover { background: #532323; } } + +#source p.run .t { border-left: 0.2em solid #00dd00; } + +#source p.run.show_run .t { background: #dfd; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } + +#source p.run.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } + +#source p.run.run2 .t { border-left: 0.2em dotted #00dd00; } + +#source p.run.run2.show_run .t { background: #eeffee; } + +@media (prefers-color-scheme: dark) { #source p.run.run2.show_run .t { background: #2b2e24; } } + +#source p.run.run2.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.run2.show_run .t:hover { background: #404633; } } + +#source p.exc .t { border-left: 0.2em solid #808080; } + +#source p.exc.show_exc .t { background: #eee; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } + +#source p.exc.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } + +#source p.exc.exc2 .t { border-left: 0.2em dotted #808080; } + +#source p.exc.exc2.show_exc .t { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { #source p.exc.exc2.show_exc .t { background: #292929; } } + +#source p.exc.exc2.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.exc2.show_exc .t:hover { background: #3c3c3c; } } + +#source p.par .t { border-left: 0.2em solid #bbbb00; } + +#source p.par.show_par .t { background: #ffa; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } + +#source p.par.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } + +#source p.par.par2 .t { border-left: 0.2em dotted #bbbb00; } + +#source p.par.par2.show_par .t { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { #source p.par.par2.show_par .t { background: #423a0f; } } + +#source p.par.par2.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.par2.show_par .t:hover { background: #6d5d0c; } } + +#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } + +@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } + +#source p .annotate.short:hover ~ .long { display: block; } + +#source p .annotate.long { width: 30em; right: 2.5em; } + +#source p input { display: none; } + +#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } + +#source p input ~ .r label.ctx::before { content: "▶ "; } + +#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } + +#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } + +#source p input:checked ~ .r label.ctx::before { content: "▼ "; } + +#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } + +#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } + +@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } + +#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } + +@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } + +#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } + +#index table.index { margin-left: -.5em; } + +#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; } + +@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } + +#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; } + +#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; } + +@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } + +#index th:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } + +#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; } + +#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } + +@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } + +#index th[aria-sort="ascending"] .arrows::after { content: " ▲"; } + +#index th[aria-sort="descending"] .arrows::after { content: " ▼"; } + +#index td.name { font-size: 1.15em; } + +#index td.name a { text-decoration: none; color: inherit; } + +#index td.name .no-noun { font-style: italic; } + +#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } + +#index tr.region:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } } + +#index tr.region:hover td.name { text-decoration: underline; color: inherit; } + +#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } + +@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } + +#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } + +@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.scss b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.scss new file mode 100644 index 0000000..7feae68 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/htmlfiles/style.scss @@ -0,0 +1,824 @@ +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt */ + +// CSS styles for coverage.py HTML reports. + +// When you edit this file, you need to run "make css" to get the CSS file +// generated, and then check in both the .scss and the .css files. + +// When working on the file, this command is useful: +// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css +// +// OR you can process sass purely in python with `pip install pysass`, then: +// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css + +// Ignore this comment, it's for the CSS output file: +/* Don't edit this .css file. Edit the .scss file instead! */ + +// Dimensions +$left-gutter: 3.5rem; + +// +// Declare colors and variables +// + +$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; +$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace; + +$off-button-lighten: 50%; +$hover-dark-amt: 95%; +$multi-dim-amt: 50%; + +$focus-color: #007acc; + +$mis-color: #ff0000; +$run-color: #00dd00; +$exc-color: #808080; +$par-color: #bbbb00; + +$light-bg: #fff; +$light-fg: #000; +$light-gray1: #f8f8f8; +$light-gray2: #eee; +$light-gray3: #ccc; +$light-gray4: #999; +$light-gray5: #666; +$light-gray6: #333; +$light-pln-bg: $light-bg; +$light-mis-bg: #fdd; +$light-run-bg: #dfd; +$light-exc-bg: $light-gray2; +$light-par-bg: #ffa; +$light-token-com: #008000; +$light-token-str: #0451a5; +$light-context-bg-color: #d0e8ff; + +$dark-bg: #1e1e1e; +$dark-fg: #eee; +$dark-gray1: #222; +$dark-gray2: #333; +$dark-gray3: #444; +$dark-gray4: #777; +$dark-gray5: #aaa; +$dark-gray6: #ddd; +$dark-pln-bg: $dark-bg; +$dark-mis-bg: #4b1818; +$dark-run-bg: #373d29; +$dark-exc-bg: $dark-gray2; +$dark-par-bg: #650; +$dark-token-com: #6a9955; +$dark-token-str: #9cdcfe; +$dark-context-bg-color: #056; + +// +// Mixins and utilities +// + +@mixin background-dark($color) { + @media (prefers-color-scheme: dark) { + background: $color; + } +} +@mixin color-dark($color) { + @media (prefers-color-scheme: dark) { + color: $color; + } +} +@mixin border-color-dark($color) { + @media (prefers-color-scheme: dark) { + border-color: $color; + } +} + +// Add visual outline to navigable elements on focus improve accessibility. +@mixin focus-border { + &:active, &:focus { + outline: 2px dashed $focus-color; + } +} + +@mixin button-shape { + font-family: inherit; + font-size: inherit; + border: 1px solid; + border-radius: .2em; + background: $light-gray2; + @include background-dark($dark-gray2); + color: inherit; + text-decoration: none; + padding: .1em .5em; + margin: 1px calc(.1em + 1px); + cursor: pointer; + border-color: $light-gray3; + @include border-color-dark($dark-gray3); +} + +// Page-wide styles +html, body, h1, h2, h3, p, table, td, th { + margin: 0; + padding: 0; + border: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; +} + +// Set baseline grid to 16 pt. +body { + font-family: $font-normal; + font-size: 1em; + background: $light-bg; + color: $light-fg; + @include background-dark($dark-bg); + @include color-dark($dark-fg); +} + +html>body { + font-size: 16px; +} + +a { + @include focus-border; +} + +p { + font-size: .875em; + line-height: 1.4em; +} + +table { + border-collapse: collapse; +} +td { + vertical-align: top; +} +table tr.hidden { + display: none !important; +} + +p#no_rows { + display: none; + font-size: 1.15em; + font-family: $font-normal; +} + +a.nav { + text-decoration: none; + color: inherit; + + &:hover { + text-decoration: underline; + color: inherit; + } +} + +.hidden { + display: none; +} + +// Page structure +header { + background: $light-gray1; + @include background-dark(black); + width: 100%; + z-index: 2; + border-bottom: 1px solid $light-gray3; + @include border-color-dark($dark-gray2); + + .content { + padding: 1rem $left-gutter; + } + + h2 { + margin-top: .5em; + font-size: 1em; + + a.button { + @include button-shape; + &.current { + border: 2px solid; + background: $light-bg; + @include background-dark($dark-bg); + border-color: $light-gray4; + @include border-color-dark($dark-gray4); + cursor: default; + } + } + } + + p.text { + margin: .5em 0 -.5em; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } + + &.sticky { + position: fixed; + left: 0; + right: 0; + height: 2.5em; + + .text { + display: none; + } + + h1, h2 { + font-size: 1em; + margin-top: 0; + display: inline-block; + } + + .content { + padding: .5rem $left-gutter; + p { + font-size: 1em; + } + } + + & ~ #source { + padding-top: 6.5em; + } + } +} + +main { + position: relative; + z-index: 1; +} + +footer { + margin: 1rem $left-gutter; + + .content { + padding: 0; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } +} + +#index { + margin: 1rem 0 0 $left-gutter; +} + +// Header styles + +h1 { + font-size: 1.25em; + display: inline-block; +} + +#filter_container { + float: right; + margin: 0 2em 0 0; + line-height: 1.66em; + + #filter { + width: 10em; + padding: 0.2em 0.5em; + border: 2px solid $light-gray3; + background: $light-bg; + color: $light-fg; + @include border-color-dark($dark-gray3); + @include background-dark($dark-bg); + @include color-dark($dark-fg); + &:focus { + border-color: $focus-color; + } + } + + :disabled ~ label{ + color: $light-gray3; + @include color-dark($dark-gray3); + } + + label { + font-size: .875em; + color: $light-gray5; + @include color-dark($dark-gray5); + } +} + +header button { + @include button-shape; + @include focus-border; + + &.run { + background: mix($light-run-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-run-bg); + &.show_run { + background: $light-run-bg; + @include background-dark($dark-run-bg); + border: 2px solid $run-color; + margin: 0 .1em; + } + } + &.mis { + background: mix($light-mis-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-mis-bg); + &.show_mis { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + border: 2px solid $mis-color; + margin: 0 .1em; + } + } + &.exc { + background: mix($light-exc-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-exc-bg); + &.show_exc { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + border: 2px solid $exc-color; + margin: 0 .1em; + } + } + &.par { + background: mix($light-par-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-par-bg); + &.show_par { + background: $light-par-bg; + @include background-dark($dark-par-bg); + border: 2px solid $par-color; + margin: 0 .1em; + } + } +} + +// Yellow post-it things. +%popup { + display: none; + position: absolute; + z-index: 999; + background: #ffffcc; + border: 1px solid #888; + border-radius: .2em; + color: #333; + padding: .25em .5em; +} + +// Yellow post-it's in the text listings. +%in-text-popup { + @extend %popup; + white-space: normal; + float: right; + top: 1.75em; + right: 1em; + height: auto; +} + +// Help panel +#help_panel_wrapper { + float: right; + position: relative; +} + +#keyboard_icon { + margin: 5px; +} + +#help_panel_state { + display: none; +} + +#help_panel { + @extend %popup; + top: 25px; + right: 0; + padding: .75em; + border: 1px solid #883; + + color: #333; + + .keyhelp p { + margin-top: .75em; + } + + .legend { + font-style: italic; + margin-bottom: 1em; + } + + .indexfile & { + width: 25em; + } + + .pyfile & { + width: 18em; + } + + #help_panel_state:checked ~ & { + display: block; + } +} + +kbd { + border: 1px solid black; + border-color: #888 #333 #333 #888; + padding: .1em .35em; + font-family: $font-code; + font-weight: bold; + background: #eee; + border-radius: 3px; +} + +// Source file styles + +// The slim bar at the left edge of the source lines, colored by coverage. +$border-indicator-width: .2em; + +#source { + padding: 1em 0 1em $left-gutter; + font-family: $font-code; + + p { + // position relative makes position:absolute pop-ups appear in the right place. + position: relative; + white-space: pre; + + * { + box-sizing: border-box; + } + + .n { + float: left; + text-align: right; + width: $left-gutter; + box-sizing: border-box; + margin-left: -$left-gutter; + padding-right: 1em; + color: $light-gray4; + user-select: none; + @include color-dark($dark-gray4); + + &.highlight { + background: #ffdd00; + } + + a { + // Make anchors to the line scroll the line to be + // visible beneath the fixed-position header. + scroll-margin-top: 6em; + text-decoration: none; + color: $light-gray4; + @include color-dark($dark-gray4); + &:hover { + text-decoration: underline; + color: $light-gray4; + @include color-dark($dark-gray4); + } + } + } + + .t { + display: inline-block; + width: 100%; + box-sizing: border-box; + margin-left: -.5em; + padding-left: .5em - $border-indicator-width; + border-left: $border-indicator-width solid $light-bg; + @include border-color-dark($dark-bg); + + &:hover { + background: mix($light-pln-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt)); + + & ~ .r .annotate.long { + display: block; + } + } + + // Syntax coloring + .com { + color: $light-token-com; + @include color-dark($dark-token-com); + font-style: italic; + line-height: 1px; + } + .key { + font-weight: bold; + line-height: 1px; + } + .str, .fst { + color: $light-token-str; + @include color-dark($dark-token-str); + } + } + + &.mis { + .t { + border-left: $border-indicator-width solid $mis-color; + } + + &.show_mis .t { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + + &:hover { + background: mix($light-mis-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt)); + } + } + + &.mis2 { + .t { + border-left: $border-indicator-width dotted $mis-color; + } + + &.show_mis .t { + background: mix($light-mis-bg, $light-bg, $multi-dim-amt); + @include background-dark(mix($dark-mis-bg, $dark-bg, $multi-dim-amt)); + + &:hover { + background: mix($light-mis-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt)); + } + } + } + } + + &.run { + .t { + border-left: $border-indicator-width solid $run-color; + } + + &.show_run .t { + background: $light-run-bg; + @include background-dark($dark-run-bg); + + &:hover { + background: mix($light-run-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt)); + } + } + + &.run2 { + .t { + border-left: $border-indicator-width dotted $run-color; + } + + &.show_run .t { + background: mix($light-run-bg, $light-bg, $multi-dim-amt); + @include background-dark(mix($dark-run-bg, $dark-bg, $multi-dim-amt)); + + &:hover { + background: mix($light-run-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt)); + } + } + } + } + + &.exc { + .t { + border-left: $border-indicator-width solid $exc-color; + } + + &.show_exc .t { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + + &:hover { + background: mix($light-exc-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt)); + } + } + + &.exc2 { + .t { + border-left: $border-indicator-width dotted $exc-color; + } + + &.show_exc .t { + background: mix($light-exc-bg, $light-bg, $multi-dim-amt); + @include background-dark(mix($dark-exc-bg, $dark-bg, $multi-dim-amt)); + + &:hover { + background: mix($light-exc-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt)); + } + } + } + } + + &.par { + .t { + border-left: $border-indicator-width solid $par-color; + } + + &.show_par .t { + background: $light-par-bg; + @include background-dark($dark-par-bg); + + &:hover { + background: mix($light-par-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt)); + } + } + + &.par2 { + .t { + border-left: $border-indicator-width dotted $par-color; + } + + &.show_par .t { + background: mix($light-par-bg, $light-bg, $multi-dim-amt); + @include background-dark(mix($dark-par-bg, $dark-bg, $multi-dim-amt)); + + &:hover { + background: mix($light-par-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt)); + } + } + } + } + + .r { + position: absolute; + top: 0; + right: 2.5em; + font-family: $font-normal; + } + + .annotate { + font-family: $font-normal; + color: $light-gray5; + @include color-dark($dark-gray6); + padding-right: .5em; + + &.short:hover ~ .long { + display: block; + } + + &.long { + @extend %in-text-popup; + width: 30em; + right: 2.5em; + } + } + + input { + display: none; + + & ~ .r label.ctx { + cursor: pointer; + border-radius: .25em; + &::before { + content: "▶ "; + } + &:hover { + background: mix($light-context-bg-color, $light-bg, $off-button-lighten); + @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten)); + color: $light-gray5; + @include color-dark($dark-gray5); + } + } + + &:checked ~ .r label.ctx { + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + color: $light-gray5; + @include color-dark($dark-gray5); + border-radius: .75em .75em 0 0; + padding: 0 .5em; + margin: -.25em 0; + &::before { + content: "▼ "; + } + } + + &:checked ~ .ctxs { + padding: .25em .5em; + overflow-y: scroll; + max-height: 10.5em; + } + } + + label.ctx { + color: $light-gray4; + @include color-dark($dark-gray4); + display: inline-block; + padding: 0 .5em; + font-size: .8333em; // 10/12 + } + + .ctxs { + display: block; + max-height: 0; + overflow-y: hidden; + transition: all .2s; + padding: 0 .5em; + font-family: $font-normal; + white-space: nowrap; + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + border-radius: .25em; + margin-right: 1.75em; + text-align: right; + } + } +} + + +// index styles +#index { + font-family: $font-code; + font-size: 0.875em; + + table.index { + margin-left: -.5em; + } + td, th { + text-align: right; + padding: .25em .5em; + border-bottom: 1px solid $light-gray2; + @include border-color-dark($dark-gray2); + &.name { + text-align: left; + width: auto; + font-family: $font-normal; + min-width: 15em; + } + } + th { + font-family: $font-normal; + font-style: italic; + color: $light-gray6; + @include color-dark($dark-gray6); + cursor: pointer; + &:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + } + .arrows { + color: #666; + font-size: 85%; + font-family: sans-serif; + font-style: normal; + pointer-events: none; + } + &[aria-sort="ascending"], &[aria-sort="descending"] { + white-space: nowrap; + background: $light-gray2; + @include background-dark($dark-gray2); + padding-left: .5em; + } + &[aria-sort="ascending"] .arrows::after { + content: " ▲"; + } + &[aria-sort="descending"] .arrows::after { + content: " ▼"; + } + } + td.name { + font-size: 1.15em; + a { + text-decoration: none; + color: inherit; + } + & .no-noun { + font-style: italic; + } + } + + tr.total td, + tr.total_dynamic td { + font-weight: bold; + border-top: 1px solid #ccc; + border-bottom: none; + } + tr.region:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + td.name { + text-decoration: underline; + color: inherit; + } + } +} + +// scroll marker styles +#scroll_marker { + position: fixed; + z-index: 3; + right: 0; + top: 0; + width: 16px; + height: 100%; + background: $light-bg; + border-left: 1px solid $light-gray2; + @include background-dark($dark-bg); + @include border-color-dark($dark-gray2); + will-change: transform; // for faster scrolling of fixed element in Chrome + + .marker { + background: $light-gray3; + @include background-dark($dark-gray3); + position: absolute; + min-height: 3px; + width: 100%; + } +} diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/inorout.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/inorout.py new file mode 100644 index 0000000..1addb22 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/inorout.py @@ -0,0 +1,614 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Determining whether files are being measured/reported or not.""" + +from __future__ import annotations + +import importlib.util +import inspect +import itertools +import os +import os.path +import platform +import re +import sys +import sysconfig +import traceback +from collections.abc import Iterable +from types import FrameType, ModuleType +from typing import TYPE_CHECKING, Any, cast + +from coverage import env +from coverage.disposition import FileDisposition, disposition_init +from coverage.exceptions import ConfigError, CoverageException, PluginError +from coverage.files import ( + GlobMatcher, + ModuleMatcher, + TreeMatcher, + canonical_filename, + find_python_files, + prep_patterns, +) +from coverage.misc import isolate_module, sys_modules_saved +from coverage.python import source_for_file, source_for_morf +from coverage.types import TDebugCtl, TFileDisposition, TMorf, TWarnFn + +if TYPE_CHECKING: + from coverage.config import CoverageConfig + from coverage.plugin_support import Plugins + + +modules_we_happen_to_have: list[ModuleType] = [ + inspect, + itertools, + os, + platform, + re, + sysconfig, + traceback, +] + +if env.PYPY: + # Pypy has some unusual stuff in the "stdlib". Consider those locations + # when deciding where the stdlib is. These modules are not used for anything, + # they are modules importable from the pypy lib directories, so that we can + # find those directories. + import _pypy_irc_topic # pylint: disable=import-error + import _structseq # pylint: disable=import-error + + modules_we_happen_to_have.extend([_structseq, _pypy_irc_topic]) + + +os = isolate_module(os) + + +def canonical_path(morf: TMorf, directory: bool = False) -> str: + """Return the canonical path of the module or file `morf`. + + If the module is a package, then return its directory. If it is a + module, then return its file, unless `directory` is True, in which + case return its enclosing directory. + + """ + morf_path = canonical_filename(source_for_morf(morf)) + if morf_path.endswith("__init__.py") or directory: + morf_path = os.path.split(morf_path)[0] + return morf_path + + +def name_for_module(filename: str, frame: FrameType | None) -> str | None: + """Get the name of the module for a filename and frame. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + module_globals = frame.f_globals if frame is not None else {} + dunder_name: str | None = module_globals.get("__name__", None) + + if isinstance(dunder_name, str) and dunder_name != "__main__": + # This is the usual case: an imported module. + return dunder_name + + spec = module_globals.get("__spec__", None) + if spec: + fullname = spec.name + if isinstance(fullname, str) and fullname != "__main__": + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + +def module_is_namespace(mod: ModuleType) -> bool: + """Is the module object `mod` a PEP420 namespace module?""" + return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None + + +def module_has_file(mod: ModuleType) -> bool: + """Does the module object `mod` have an existing __file__ ?""" + mod__file__ = getattr(mod, "__file__", None) + if mod__file__ is None: + return False + return os.path.exists(mod__file__) + + +def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]: + """Find the file and search path for `modulename`. + + Returns: + filename: The filename of the module, or None. + path: A list (possibly empty) of directories to find submodules in. + + """ + filename = None + path = [] + try: + spec = importlib.util.find_spec(modulename) + except Exception: + pass + else: + if spec is not None: + filename = spec.origin + path = list(spec.submodule_search_locations or ()) + return filename, path + + +def add_stdlib_paths(paths: set[str]) -> None: + """Add paths where the stdlib can be found to the set `paths`.""" + # Look at where some standard modules are located. That's the + # indication for "installed with the interpreter". In some + # environments (virtualenv, for example), these modules may be + # spread across a few locations. Look at all the candidate modules + # we've imported, and take all the different ones. + for m in modules_we_happen_to_have: + if hasattr(m, "__file__"): + paths.add(canonical_path(m, directory=True)) + + +def add_third_party_paths(paths: set[str]) -> None: + """Add locations for third-party packages to the set `paths`.""" + # Get the paths that sysconfig knows about. + scheme_names = set(sysconfig.get_scheme_names()) + + for scheme in scheme_names: + # https://foss.heptapod.net/pypy/pypy/-/issues/3433 + better_scheme = "pypy_posix" if scheme == "pypy" else scheme + if os.name in better_scheme.split("_"): + config_paths = sysconfig.get_paths(scheme) + for path_name in ["platlib", "purelib", "scripts"]: + paths.add(config_paths[path_name]) + + +def add_coverage_paths(paths: set[str]) -> None: + """Add paths where coverage.py code can be found to the set `paths`.""" + cover_path = canonical_path(__file__, directory=True) + paths.add(cover_path) + if env.TESTING: + # Don't include our own test code. + paths.add(os.path.join(cover_path, "tests")) + + +class InOrOut: + """Machinery for determining what files to measure.""" + + def __init__( + self, + config: CoverageConfig, + warn: TWarnFn, + debug: TDebugCtl | None, + include_namespace_packages: bool, + ) -> None: + self.warn = warn + self.debug = debug + self.include_namespace_packages = include_namespace_packages + + self.source_pkgs: list[str] = [] + self.source_pkgs.extend(config.source_pkgs) + self.source_dirs: list[str] = [] + self.source_dirs.extend(config.source_dirs) + for src in config.source or []: + if os.path.isdir(src): + self.source_dirs.append(src) + else: + self.source_pkgs.append(src) + + # Canonicalize everything in `source_dirs`. + # Also confirm that they actually are directories. + for i, src in enumerate(self.source_dirs): + if not os.path.isdir(src): + raise ConfigError(f"Source dir is not a directory: {src!r}") + self.source_dirs[i] = canonical_filename(src) + + self.source_pkgs_unmatched = self.source_pkgs[:] + + self.include = prep_patterns(config.run_include) + self.omit = prep_patterns(config.run_omit) + + # The directories for files considered "installed with the interpreter". + self.pylib_paths: set[str] = set() + if not config.cover_pylib: + add_stdlib_paths(self.pylib_paths) + + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_paths: set[str] = set() + add_coverage_paths(self.cover_paths) + + # Find where third-party packages are installed. + self.third_paths: set[str] = set() + add_third_party_paths(self.third_paths) + + def _debug(msg: str) -> None: + if self.debug: + self.debug.write(msg) + + # The matchers for should_trace. + + # Generally useful information + _debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) + + # Create the matchers we need for should_trace + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = None + self.include_match = self.omit_match = None + + if self.source_dirs or self.source_pkgs: + against = [] + if self.source_dirs: + self.source_match = TreeMatcher(self.source_dirs, "source") + against.append(f"trees {self.source_match!r}") + if self.source_pkgs: + self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs") + against.append(f"modules {self.source_pkgs_match!r}") + _debug("Source matching against " + " and ".join(against)) + else: + if self.pylib_paths: + self.pylib_match = TreeMatcher(self.pylib_paths, "pylib") + _debug(f"Python stdlib matching: {self.pylib_match!r}") + if self.include: + self.include_match = GlobMatcher(self.include, "include") + _debug(f"Include matching: {self.include_match!r}") + if self.omit: + self.omit_match = GlobMatcher(self.omit, "omit") + _debug(f"Omit matching: {self.omit_match!r}") + + self.cover_match = TreeMatcher(self.cover_paths, "coverage") + _debug(f"Coverage code matching: {self.cover_match!r}") + + self.third_match = TreeMatcher(self.third_paths, "third") + _debug(f"Third-party lib matching: {self.third_match!r}") + + # Check if the source we want to measure has been installed as a + # third-party package. + # Is the source inside a third-party area? + self.source_in_third_paths = set() + with sys_modules_saved(): + for pkg in self.source_pkgs: + try: + modfile, path = file_and_path_for_module(pkg) + _debug(f"Imported source package {pkg!r} as {modfile!r}") + except CoverageException as exc: + _debug(f"Couldn't import source package {pkg!r}: {exc}") + continue + if modfile: + if self.third_match.match(modfile): + _debug( + f"Source in third-party: source_pkg {pkg!r} at {modfile!r}", + ) + self.source_in_third_paths.add(canonical_path(source_for_file(modfile))) + else: + for pathdir in path: + if self.third_match.match(pathdir): + _debug( + f"Source in third-party: {pkg!r} path directory at {pathdir!r}", + ) + self.source_in_third_paths.add(pathdir) + + for src in self.source_dirs: + if self.third_match.match(src): + _debug(f"Source in third-party: source directory {src!r}") + self.source_in_third_paths.add(src) + self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third") + _debug(f"Source in third-party matching: {self.source_in_third_match}") + + self.plugins: Plugins + self.disp_class: type[TFileDisposition] = FileDisposition + + def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition: + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = disposition_init(self.disp_class, filename) + + def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + if original_filename.startswith("<"): + return nope(disp, "original file name is not real") + + if frame is not None: + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals and frame.f_globals.get("__file__") + if dunder_file: + # Danger: __file__ can (rarely?) be of type Path. + filename = source_for_file(str(dunder_file)) + if original_filename and not original_filename.startswith("<"): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith("memory:"): + return nope(disp, "memory isn't traceable") + + if filename.startswith("<"): + # Lots of non-file execution is represented with artificial + # file names like "", "", or + # "". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "file name is not real") + + canonical = canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = canonical_filename( + file_tracer.source_filename(), + ) + break + except Exception: + plugin_name = plugin._coverage_plugin_name + tb = traceback.format_exc() + self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}") + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise PluginError( + f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'", + ) + reason = self.check_include_omit_etc(disp.source_filename, frame) + if reason: + nope(disp, reason) + + return disp + + def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None: + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = name_for_module(filename, frame) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match or self.source_pkgs_match: + extra = "" + ok = False + if self.source_pkgs_match: + if isinstance(modulename, str) and self.source_pkgs_match.match(modulename): + ok = True + if modulename in self.source_pkgs_unmatched: + self.source_pkgs_unmatched.remove(modulename) + else: + extra = f"module {modulename!r} " + if not ok and self.source_match: + if self.source_match.match(filename): + ok = True + if not ok: + return extra + "falls outside the --source spec" + if self.third_match.match(filename) and not self.source_in_third_match.match(filename): + return "inside --source, but is third-party" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match.match(filename): + return "is part of coverage.py" + + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # Exclude anything in the third-party installation areas. + if self.third_match.match(filename): + return "is a third-party module" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No point tracing a file we can't later write to SQLite. + try: + filename.encode("utf-8") + except UnicodeEncodeError: + return "non-encodable filename" + + # No reason found to skip this file. + return None + + def warn_conflicting_settings(self) -> None: + """Warn if there are settings that conflict.""" + if self.include: + if self.source_dirs or self.source_pkgs: + self.warn("--include is ignored because --source is set", slug="include-ignored") + + def warn_already_imported_files(self) -> None: + """Warn if files have already been imported that we will be measuring.""" + if self.include or self.source_dirs or self.source_pkgs: + warned = set() + for mod in list(sys.modules.values()): + filename = getattr(mod, "__file__", None) + if filename is None: + continue + if filename in warned: + continue + + if len(getattr(mod, "__path__", ())) > 1: + # A namespace package, which confuses this code, so ignore it. + continue + + disp = self.should_trace(filename) + if disp.has_dynamic_filename: + # A plugin with dynamic filenames: the Python file + # shouldn't cause a warning, since it won't be the subject + # of tracing anyway. + continue + if disp.trace: + msg = f"Already imported a file that will be measured: {filename}" + self.warn(msg, slug="already-imported") + warned.add(filename) + elif self.debug and self.debug.should("trace"): + self.debug.write( + "Didn't trace already imported file {!r}: {}".format( + disp.original_filename, + disp.reason, + ), + ) + + def warn_unimported_source(self) -> None: + """Warn about source packages that were of interest, but never traced.""" + for pkg in self.source_pkgs_unmatched: + self._warn_about_unmeasured_code(pkg) + + def _warn_about_unmeasured_code(self, pkg: str) -> None: + """Warn about a package or module that we never traced. + + `pkg` is a string, the name of the package or module. + + """ + mod = sys.modules.get(pkg) + if mod is None: + self.warn(f"Module {pkg} was never imported.", slug="module-not-imported") + return + + if module_is_namespace(mod): + # A namespace package. It's OK for this not to have been traced, + # since there is no code directly in it. + return + + if not module_has_file(mod): + self.warn(f"Module {pkg} has no Python source.", slug="module-not-python") + return + + # The module was in sys.modules, and seems like a module with code, but + # we never measured it. I guess that means it was imported before + # coverage even started. + msg = f"Module {pkg} was previously imported, but not measured" + self.warn(msg, slug="module-not-measured") + + def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]: + """Find files in the areas of interest that might be untraced. + + Yields pairs: file path, and responsible plug-in name. + """ + for pkg in self.source_pkgs: + if pkg not in sys.modules or not module_has_file(sys.modules[pkg]): + continue + pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) + yield from self._find_executable_files(canonical_path(pkg_file)) + + for src in self.source_dirs: + yield from self._find_executable_files(src) + + def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]: + """Get executable files from the plugins.""" + for plugin in self.plugins.file_tracers: + for x_file in plugin.find_executable_files(src_dir): + yield x_file, plugin._coverage_plugin_name + + def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]: + """Find executable files in `src_dir`. + + Search for files in `src_dir` that can be executed because they + are probably importable. Don't include ones that have been omitted + by the configuration. + + Yield the file path, and the plugin name that handles the file. + + """ + py_files = ( + (py_file, None) + for py_file in find_python_files(src_dir, self.include_namespace_packages) + ) + plugin_files = self._find_plugin_files(src_dir) + + for file_path, plugin_name in itertools.chain(py_files, plugin_files): + file_path = canonical_filename(file_path) + if self.omit_match and self.omit_match.match(file_path): + # Turns out this file was omitted, so don't pull it back + # in as un-executed. + continue + yield file_path, plugin_name + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Our information for Coverage.sys_info. + + Returns a list of (key, value) pairs. + """ + info = [ + ("coverage_paths", self.cover_paths), + ("stdlib_paths", self.pylib_paths), + ("third_party_paths", self.third_paths), + ("source_in_third_party_paths", self.source_in_third_paths), + ] + + matcher_names = [ + "source_match", + "source_pkgs_match", + "include_match", + "omit_match", + "cover_match", + "pylib_match", + "third_match", + "source_in_third_match", + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = "-none-" + info.append((matcher_name, matcher_info)) + + return info diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/jsonreport.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/jsonreport.py new file mode 100644 index 0000000..7b51cb7 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/jsonreport.py @@ -0,0 +1,188 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Json reporting for coverage.py""" + +from __future__ import annotations + +import datetime +import json +import sys +from collections.abc import Iterable +from typing import IO, TYPE_CHECKING, Any + +from coverage import __version__ +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, AnalysisNarrower, Numbers +from coverage.types import TLineNo, TMorf + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.data import CoverageData + from coverage.plugin import FileReporter + + +# A type for data that can be JSON-serialized. +JsonObj = dict[str, Any] + +# "Version 1" had no format number at all. +# 2: add the meta.format field. +# 3: add region information (functions, classes) +FORMAT_VERSION = 3 + + +class JsonReporter: + """A reporter for writing JSON coverage results.""" + + report_type = "JSON report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.total = Numbers(self.config.precision) + self.report_data: JsonObj = {} + + def make_summary(self, nums: Numbers) -> JsonObj: + """Create a dict summarizing `nums`.""" + return { + "covered_lines": nums.n_executed, + "num_statements": nums.n_statements, + "percent_covered": nums.pc_covered, + "percent_covered_display": nums.pc_covered_str, + "missing_lines": nums.n_missing, + "excluded_lines": nums.n_excluded, + } + + def make_branch_summary(self, nums: Numbers) -> JsonObj: + """Create a dict summarizing the branch info in `nums`.""" + return { + "num_branches": nums.n_branches, + "num_partial_branches": nums.n_partial_branches, + "covered_branches": nums.n_executed_branches, + "missing_branches": nums.n_missing_branches, + } + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Generate a json report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the json to. + + """ + outfile = outfile or sys.stdout + coverage_data = self.coverage.get_data() + coverage_data.set_query_contexts(self.config.report_contexts) + self.report_data["meta"] = { + "format": FORMAT_VERSION, + "version": __version__, + "timestamp": datetime.datetime.now().isoformat(), + "branch_coverage": coverage_data.has_arcs(), + "show_contexts": self.config.json_show_contexts, + } + + measured_files = {} + for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): + measured_files[file_reporter.relative_filename()] = self.report_one_file( + coverage_data, + analysis, + file_reporter, + ) + + self.report_data["files"] = measured_files + self.report_data["totals"] = self.make_summary(self.total) + + if coverage_data.has_arcs(): + self.report_data["totals"].update(self.make_branch_summary(self.total)) + + json.dump( + self.report_data, + outfile, + indent=(4 if self.config.json_pretty_print else None), + ) + + return self.total.n_statements and self.total.pc_covered + + def report_one_file( + self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter + ) -> JsonObj: + """Extract the relevant report data for a single file.""" + nums = analysis.numbers + self.total += nums + summary = self.make_summary(nums) + reported_file: JsonObj = { + "executed_lines": sorted(analysis.executed), + "summary": summary, + "missing_lines": sorted(analysis.missing), + "excluded_lines": sorted(analysis.excluded), + } + if self.config.json_show_contexts: + reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename) + if coverage_data.has_arcs(): + summary.update(self.make_branch_summary(nums)) + reported_file["executed_branches"] = list( + _convert_branch_arcs(analysis.executed_branch_arcs()), + ) + reported_file["missing_branches"] = list( + _convert_branch_arcs(analysis.missing_branch_arcs()), + ) + + num_lines = len(file_reporter.source().splitlines()) + regions = file_reporter.code_regions() + for noun, plural in file_reporter.code_region_kinds(): + outside_lines = set(range(1, num_lines + 1)) + for region in regions: + if region.kind != noun: + continue + outside_lines -= region.lines + + narrower = AnalysisNarrower(analysis) + narrower.add_regions(r.lines for r in regions if r.kind == noun) + narrower.add_regions([outside_lines]) + + reported_file[plural] = region_data = {} + for region in regions: + if region.kind != noun: + continue + region_data[region.name] = self.make_region_data( + coverage_data, + narrower.narrow(region.lines), + ) + + region_data[""] = self.make_region_data( + coverage_data, + narrower.narrow(outside_lines), + ) + return reported_file + + def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj: + """Create the data object for one region of a file.""" + narrowed_nums = narrowed_analysis.numbers + narrowed_summary = self.make_summary(narrowed_nums) + this_region = { + "executed_lines": sorted(narrowed_analysis.executed), + "summary": narrowed_summary, + "missing_lines": sorted(narrowed_analysis.missing), + "excluded_lines": sorted(narrowed_analysis.excluded), + } + if self.config.json_show_contexts: + contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename) + this_region["contexts"] = contexts + if coverage_data.has_arcs(): + narrowed_summary.update(self.make_branch_summary(narrowed_nums)) + this_region["executed_branches"] = list( + _convert_branch_arcs(narrowed_analysis.executed_branch_arcs()), + ) + this_region["missing_branches"] = list( + _convert_branch_arcs(narrowed_analysis.missing_branch_arcs()), + ) + return this_region + + +def _convert_branch_arcs( + branch_arcs: dict[TLineNo, list[TLineNo]], +) -> Iterable[tuple[TLineNo, TLineNo]]: + """Convert branch arcs to a list of two-element tuples.""" + for source, targets in branch_arcs.items(): + for target in targets: + yield source, target diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/lcovreport.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/lcovreport.py new file mode 100644 index 0000000..c29cfe4 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/lcovreport.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""LCOV reporting for coverage.py.""" + +from __future__ import annotations + +import base64 +import hashlib +import sys +from collections.abc import Iterable +from typing import IO, TYPE_CHECKING + +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, AnalysisNarrower, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +def line_hash(line: str) -> str: + """Produce a hash of a source line for use in the LCOV file.""" + # The LCOV file format optionally allows each line to be MD5ed as a + # fingerprint of the file. This is not a security use. Some security + # scanners raise alarms about the use of MD5 here, but it is a false + # positive. This is not a security concern. + # The unusual encoding of the MD5 hash, as a base64 sequence with the + # trailing = signs stripped, is specified by the LCOV file format. + hashed = hashlib.md5(line.encode("utf-8"), usedforsecurity=False).digest() + return base64.b64encode(hashed).decode("ascii").rstrip("=") + + +def lcov_lines( + analysis: Analysis, + lines: list[int], + source_lines: list[str], + outfile: IO[str], +) -> None: + """Emit line coverage records for an analyzed file.""" + hash_suffix = "" + for line in lines: + if source_lines: + hash_suffix = "," + line_hash(source_lines[line - 1]) + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + hit = int(line not in analysis.missing) + outfile.write(f"DA:{line},{hit}{hash_suffix}\n") + + if analysis.numbers.n_statements > 0: + outfile.write(f"LF:{analysis.numbers.n_statements}\n") + outfile.write(f"LH:{analysis.numbers.n_executed}\n") + + +def lcov_functions( + fr: FileReporter, + file_analysis: Analysis, + outfile: IO[str], +) -> None: + """Emit function coverage records for an analyzed file.""" + # lcov 2.2 introduces a new format for function coverage records. + # We continue to generate the old format because we don't know what + # version of the lcov tools will be used to read this report. + + # "and region.lines" below avoids a crash due to a bug in PyPy 3.8 + # where, for whatever reason, when collecting data in --branch mode, + # top-level functions have an empty lines array. Instead we just don't + # emit function records for those. + + # suppressions because of https://github.com/pylint-dev/pylint/issues/9923 + functions = [ + ( + min(region.start, min(region.lines)), # pylint: disable=nested-min-max + max(region.start, max(region.lines)), # pylint: disable=nested-min-max + region, + ) + for region in fr.code_regions() + if region.kind == "function" and region.lines + ] + if not functions: + return + + narrower = AnalysisNarrower(file_analysis) + narrower.add_regions(r.lines for _, _, r in functions) + + functions.sort() + functions_hit = 0 + for first_line, last_line, region in functions: + # A function counts as having been executed if any of it has been + # executed. + analysis = narrower.narrow(region.lines) + hit = int(analysis.numbers.n_executed > 0) + functions_hit += hit + + outfile.write(f"FN:{first_line},{last_line},{region.name}\n") + outfile.write(f"FNDA:{hit},{region.name}\n") + + outfile.write(f"FNF:{len(functions)}\n") + outfile.write(f"FNH:{functions_hit}\n") + + +def lcov_arcs( + fr: FileReporter, + analysis: Analysis, + lines: list[int], + outfile: IO[str], +) -> None: + """Emit branch coverage records for an analyzed file.""" + branch_stats = analysis.branch_stats() + executed_arcs = analysis.executed_branch_arcs() + missing_arcs = analysis.missing_branch_arcs() + + for line in lines: + if line not in branch_stats: + continue + + # This is only one of several possible ways to map our sets of executed + # and not-executed arcs to BRDA codes. It seems to produce reasonable + # results when fed through genhtml. + _, taken = branch_stats[line] + + if taken == 0: + # When _none_ of the out arcs from 'line' were executed, + # it can mean the line always raised an exception. + assert len(executed_arcs[line]) == 0 + destinations = [(dst, "-") for dst in missing_arcs[line]] + else: + # Q: can we get counts of the number of times each arc was executed? + # branch_stats has "total" and "taken" counts for each branch, + # but it doesn't have "taken" broken down by destination. + destinations = [(dst, "1") for dst in executed_arcs[line]] + destinations.extend((dst, "0") for dst in missing_arcs[line]) + + # Sort exit arcs after normal arcs. Exit arcs typically come from + # an if statement, at the end of a function, with no else clause. + # This structure reads like you're jumping to the end of the function + # when the conditional expression is false, so it should be presented + # as the second alternative for the branch, after the alternative that + # enters the if clause. + destinations.sort(key=lambda d: (d[0] < 0, d)) + + for dst, hit in destinations: + branch = fr.arc_description(line, dst) + outfile.write(f"BRDA:{line},0,{branch},{hit}\n") + + # Summary of the branch coverage. + brf = sum(t for t, k in branch_stats.values()) + brh = brf - sum(t - k for t, k in branch_stats.values()) + if brf > 0: + outfile.write(f"BRF:{brf}\n") + outfile.write(f"BRH:{brh}\n") + + +class LcovReporter: + """A reporter for writing LCOV coverage reports.""" + + report_type = "LCOV report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = coverage.config + self.total = Numbers(self.coverage.config.precision) + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Renders the full lcov report. + + `morfs` is a list of modules or filenames + + outfile is the file object to write the file into. + """ + + self.coverage.get_data() + outfile = outfile or sys.stdout + + # ensure file records are sorted by the _relative_ filename, not the full path + to_report = [ + (fr.relative_filename(), fr, analysis) + for fr, analysis in get_analysis_to_report(self.coverage, morfs) + ] + to_report.sort() + + for fname, fr, analysis in to_report: + self.total += analysis.numbers + self.lcov_file(fname, fr, analysis, outfile) + + return self.total.n_statements and self.total.pc_covered + + def lcov_file( + self, + rel_fname: str, + fr: FileReporter, + analysis: Analysis, + outfile: IO[str], + ) -> None: + """Produces the lcov data for a single file. + + This currently supports both line and branch coverage, + however function coverage is not supported. + """ + + if analysis.numbers.n_statements == 0: + if self.config.skip_empty: + return + + outfile.write(f"SF:{rel_fname}\n") + + lines = sorted(analysis.statements) + if self.config.lcov_line_checksums: + source_lines = fr.source().splitlines() + else: + source_lines = [] + + lcov_lines(analysis, lines, source_lines, outfile) + lcov_functions(fr, analysis, outfile) + if analysis.has_arcs: + lcov_arcs(fr, analysis, lines, outfile) + + outfile.write("end_of_record\n") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/misc.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/misc.py new file mode 100644 index 0000000..82abeab --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/misc.py @@ -0,0 +1,373 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Miscellaneous stuff for coverage.py.""" + +from __future__ import annotations + +import contextlib +import datetime +import errno +import functools +import hashlib +import importlib +import importlib.util +import inspect +import os +import os.path +import re +import sys +import types +from collections.abc import Iterable, Iterator, Mapping, Sequence +from types import ModuleType +from typing import Any, NoReturn, TypeVar + +# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of +# other packages were importing the exceptions from misc, so import them here. +# pylint: disable=unused-wildcard-import +from coverage.exceptions import * # pylint: disable=wildcard-import +from coverage.exceptions import CoverageException +from coverage.types import TArc + +ISOLATED_MODULES: dict[ModuleType, ModuleType] = {} + + +def isolate_module(mod: ModuleType) -> ModuleType: + """Copy a module so that we are isolated from aggressive mocking. + + If a test suite mocks os.path.exists (for example), and then we need to use + it during the test, everything will get tangled up if we use their mock. + Making a copy of the module when we import it will isolate coverage.py from + those complications. + """ + if mod not in ISOLATED_MODULES: + new_mod = types.ModuleType(mod.__name__) + ISOLATED_MODULES[mod] = new_mod + for name in dir(mod): + value = getattr(mod, name) + if isinstance(value, types.ModuleType): + value = isolate_module(value) + setattr(new_mod, name, value) + return ISOLATED_MODULES[mod] + + +os = isolate_module(os) + + +class SysModuleSaver: + """Saves the contents of sys.modules, and removes new modules later.""" + + def __init__(self) -> None: + self.old_modules = set(sys.modules) + + def restore(self) -> None: + """Remove any modules imported since this object started.""" + new_modules = set(sys.modules) - self.old_modules + for m in new_modules: + del sys.modules[m] + + +@contextlib.contextmanager +def sys_modules_saved() -> Iterator[None]: + """A context manager to remove any modules imported during a block.""" + saver = SysModuleSaver() + try: + yield + finally: + saver.restore() + + +def import_third_party(modname: str) -> tuple[ModuleType, bool]: + """Import a third-party module we need, but might not be installed. + + This also cleans out the module after the import, so that coverage won't + appear to have imported it. This lets the third party use coverage for + their own tests. + + Arguments: + modname (str): the name of the module to import. + + Returns: + The imported module, and a boolean indicating if the module could be imported. + + If the boolean is False, the module returned is not the one you want: don't use it. + + """ + with sys_modules_saved(): + try: + return importlib.import_module(modname), True + except ImportError: + return sys, False + + +def nice_pair(pair: TArc) -> str: + """Make a nice string representation of a pair of numbers. + + If the numbers are equal, just return the number, otherwise return the pair + with a dash between them, indicating the range. + + """ + start, end = pair + if start == end: + return f"{start}" + else: + return f"{start}-{end}" + + +def bool_or_none(b: Any) -> bool | None: + """Return bool(b), but preserve None.""" + if b is None: + return None + else: + return bool(b) + + +def join_regex(regexes: Iterable[str]) -> str: + """Combine a series of regex strings into one that matches any of them.""" + regexes = list(regexes) + if len(regexes) == 1: + return regexes[0] + else: + return "|".join(f"(?:{r})" for r in regexes) + + +def file_be_gone(path: str) -> None: + """Remove a file, and don't get annoyed if it doesn't exist.""" + try: + os.remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +def ensure_dir(directory: str) -> None: + """Make sure the directory exists. + + If `directory` is None or empty, do nothing. + """ + if directory: + os.makedirs(directory, exist_ok=True) + + +def ensure_dir_for_file(path: str) -> None: + """Make sure the directory for the path exists.""" + ensure_dir(os.path.dirname(path)) + + +class Hasher: + """Hashes Python data for fingerprinting.""" + + def __init__(self) -> None: + self.hash = hashlib.new("sha3_256", usedforsecurity=False) + + def update(self, v: Any) -> None: + """Add `v` to the hash, recursively if needed.""" + self.hash.update(str(type(v)).encode("utf-8")) + match v: + case None: + pass + case str(): + self.hash.update(v.encode("utf-8")) + case bytes(): + self.hash.update(v) + case int() | float(): + self.hash.update(str(v).encode("utf-8")) + case tuple() | list(): + for e in v: + self.update(e) + case dict(): + keys = v.keys() + for k in sorted(keys): + self.update(k) + self.update(v[k]) + case _: + for k in dir(v): + if k.startswith("__"): + continue + a = getattr(v, k) + if inspect.isroutine(a): + continue + self.update(k) + self.update(a) + self.hash.update(b".") + + def hexdigest(self) -> str: + """Retrieve the hex digest of the hash.""" + return self.hash.hexdigest()[:32] + + +def _needs_to_implement(that: Any, func_name: str) -> NoReturn: + """Helper to raise NotImplementedError in interface stubs.""" + if hasattr(that, "_coverage_plugin_name"): + thing = "Plugin" + name = that._coverage_plugin_name + else: + thing = "Class" + klass = that.__class__ + name = f"{klass.__module__}.{klass.__name__}" + + raise NotImplementedError( + f"{thing} {name!r} needs to implement {func_name}()", + ) + + +class DefaultValue: + """A sentinel object to use for unusual default-value needs. + + Construct with a string that will be used as the repr, for display in help + and Sphinx output. + + """ + + def __init__(self, display_as: str) -> None: + self.display_as = display_as + + def __repr__(self) -> str: + return self.display_as + + +def substitute_variables(text: str, variables: Mapping[str, str]) -> str: + """Substitute ``${VAR}`` variables in `text` with their values. + + Variables in the text can take a number of shell-inspired forms:: + + $VAR + ${VAR} + ${VAR?} strict: an error if VAR isn't defined. + ${VAR-missing} defaulted: "missing" if VAR isn't defined. + $$ just a dollar sign. + + `variables` is a dictionary of variable values. + + Returns the resulting text with values substituted. + + """ + dollar_pattern = r"""(?x) # Use extended regex syntax + \$ # A dollar sign, + (?: # then + (?P \$ ) | # a dollar sign, or + (?P \w+ ) | # a plain word, or + \{ # a {-wrapped + (?P \w+ ) # word, + (?: # either + (?P \? ) | # with a strict marker + -(?P [^}]* ) # or a default value + )? # maybe. + } + ) + """ + + dollar_groups = ("dollar", "word1", "word2") + + def dollar_replace(match: re.Match[str]) -> str: + """Called for each $replacement.""" + # Only one of the dollar_groups will have matched, just get its text. + word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks + if word == "$": + return "$" + elif word in variables: + return variables[word] + elif match["strict"]: + msg = f"Variable {word} is undefined: {text!r}" + raise CoverageException(msg) + else: + return match["defval"] + + text = re.sub(dollar_pattern, dollar_replace, text) + return text + + +def format_local_datetime(dt: datetime.datetime) -> str: + """Return a string with local timezone representing the date.""" + return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") + + +def import_local_file(modname: str, modfile: str | None = None) -> ModuleType: + """Import a local file as a module. + + Opens a file in the current directory named `modname`.py, imports it + as `modname`, and returns the module object. `modfile` is the file to + import if it isn't in the current directory. + + """ + if modfile is None: + modfile = modname + ".py" + spec = importlib.util.spec_from_file_location(modname, modfile) + assert spec is not None + mod = importlib.util.module_from_spec(spec) + sys.modules[modname] = mod + assert spec.loader is not None + spec.loader.exec_module(mod) + + return mod + + +@functools.cache +def _human_key(s: str) -> tuple[list[str | int], str]: + """Turn a string into a list of string and number chunks. + + "z23a" -> (["z", 23, "a"], "z23a") + + The original string is appended as a last value to ensure the + key is unique enough so that "x1y" and "x001y" can be distinguished. + """ + + def tryint(s: str) -> str | int: + """If `s` is a number, return an int, else `s` unchanged.""" + try: + return int(s) + except ValueError: + return s + + return ([tryint(c) for c in re.split(r"(\d+)", s)], s) + + +def human_sorted(strings: Iterable[str]) -> list[str]: + """Sort the given iterable of strings the way that humans expect. + + Numeric components in the strings are sorted as numbers. + + Returns the sorted list. + + """ + return sorted(strings, key=_human_key) + + +SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) + + +def human_sorted_items( + items: Iterable[SortableItem], + reverse: bool = False, +) -> list[SortableItem]: + """Sort (string, ...) items the way humans expect. + + The elements of `items` can be any tuple/list. They'll be sorted by the + first element (a string), with ties broken by the remaining elements. + + Returns the sorted list of items. + """ + return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) + + +def plural(n: int, thing: str = "", things: str = "") -> str: + """Pluralize a word. + + If n is 1, return thing. Otherwise return things, or thing+s. + """ + if n == 1: + return thing + else: + return things or (thing + "s") + + +def stdout_link(text: str, url: str) -> str: + """Format text+url as a clickable link for stdout. + + If attached to a terminal, use escape sequences. Otherwise, just return + the text. + """ + if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): + return f"\033]8;;{url}\a{text}\033]8;;\a" + else: + return text diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/multiproc.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/multiproc.py new file mode 100644 index 0000000..dde2ca2 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/multiproc.py @@ -0,0 +1,120 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Monkey-patching to add multiprocessing support for coverage.py""" + +from __future__ import annotations + +import multiprocessing +import multiprocessing.process +import os +import os.path +import sys +import traceback +from typing import Any + +from coverage.debug import DebugControl + +# An attribute that will be set on the module to indicate that it has been +# monkey-patched. +PATCHED_MARKER = "_coverage$patched" + + +OriginalProcess = multiprocessing.process.BaseProcess +original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] + + +class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method + """A replacement for multiprocess.Process that starts coverage.""" + + def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] + """Wrapper around _bootstrap to start coverage.""" + debug: DebugControl | None = None + try: + from coverage import Coverage # avoid circular import + + cov = Coverage(data_suffix=True, auto_data=True) + cov._warn_preimported_source = False + cov.start() + _debug = cov._debug + assert _debug is not None + if _debug.should("multiproc"): + debug = _debug + if debug: + debug.write("Calling multiprocessing bootstrap") + except Exception: + print("Exception during multiprocessing bootstrap init:", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + sys.stderr.flush() + raise + try: + return original_bootstrap(self, *args, **kwargs) + finally: + if debug: + debug.write("Finished multiprocessing bootstrap") + try: + cov.stop() + cov.save() + except Exception as exc: + if debug: + debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc) + raise + if debug: + debug.write("Saved multiprocessing data") + + +class Stowaway: + """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" + + def __init__(self, rcfile: str) -> None: + self.rcfile = rcfile + + def __getstate__(self) -> dict[str, str]: + return {"rcfile": self.rcfile} + + def __setstate__(self, state: dict[str, str]) -> None: + patch_multiprocessing(state["rcfile"]) + + +def patch_multiprocessing(rcfile: str) -> None: + """Monkey-patch the multiprocessing module. + + This enables coverage measurement of processes started by multiprocessing. + This involves aggressive monkey-patching. + + `rcfile` is the path to the rcfile being used. + + """ + + if hasattr(multiprocessing, PATCHED_MARKER): + return + + OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] + + # Set the value in ProcessWithCoverage that will be pickled into the child + # process. + os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile) + + # When spawning processes rather than forking them, we have no state in the + # new process. We sneak in there with a Stowaway: we stuff one of our own + # objects into the data that gets pickled and sent to the subprocess. When + # the Stowaway is unpickled, its __setstate__ method is called, which + # re-applies the monkey-patch. + # Windows only spawns, so this is needed to keep Windows working. + try: + from multiprocessing import spawn + + original_get_preparation_data = spawn.get_preparation_data + except (ImportError, AttributeError): + pass + else: + + def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]: + """Get the original preparation data, and also insert our stowaway.""" + d = original_get_preparation_data(name) + d["stowaway"] = Stowaway(rcfile) + return d + + spawn.get_preparation_data = get_preparation_data_with_stowaway + + setattr(multiprocessing, PATCHED_MARKER, True) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/numbits.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/numbits.py new file mode 100644 index 0000000..e813559 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/numbits.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +""" +Functions to manipulate packed binary representations of number sets. + +To save space, coverage stores sets of line numbers in SQLite using a packed +binary representation called a numbits. A numbits is a set of positive +integers. + +A numbits is stored as a blob in the database. The exact meaning of the bytes +in the blobs should be considered an implementation detail that might change in +the future. Use these functions to work with those binary blobs of data. + +""" + +from __future__ import annotations + +import json +import sqlite3 +from collections.abc import Iterable +from itertools import zip_longest + + +def nums_to_numbits(nums: Iterable[int]) -> bytes: + """Convert `nums` into a numbits. + + Arguments: + nums: a reusable iterable of integers, the line numbers to store. + + Returns: + A binary blob. + """ + try: + nbytes = max(nums) // 8 + 1 + except ValueError: + # nums was empty. + return b"" + b = bytearray(nbytes) + for num in nums: + b[num // 8] |= 1 << num % 8 + return bytes(b) + + +def numbits_to_nums(numbits: bytes) -> list[int]: + """Convert a numbits into a list of numbers. + + Arguments: + numbits: a binary blob, the packed number set. + + Returns: + A list of ints. + + When registered as a SQLite function by :func:`register_sqlite_functions`, + this returns a string, a JSON-encoded list of ints. + + """ + nums = [] + for byte_i, byte in enumerate(numbits): + for bit_i in range(8): + if byte & (1 << bit_i): + nums.append(byte_i * 8 + bit_i) + return nums + + +def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the union of two numbits. + + Returns: + A new numbits, the union of `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return bytes(b1 | b2 for b1, b2 in byte_pairs) + + +def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the intersection of two numbits. + + Returns: + A new numbits, the intersection `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) + return intersection_bytes.rstrip(b"\0") + + +def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool: + """Is there any number that appears in both numbits? + + Determine whether two number sets have a non-empty intersection. This is + faster than computing the intersection. + + Returns: + A bool, True if there is any number in both `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return any(b1 & b2 for b1, b2 in byte_pairs) + + +def num_in_numbits(num: int, numbits: bytes) -> bool: + """Does the integer `num` appear in `numbits`? + + Returns: + A bool, True if `num` is a member of `numbits`. + """ + nbyte, nbit = divmod(num, 8) + if nbyte >= len(numbits): + return False + return bool(numbits[nbyte] & (1 << nbit)) + + +def register_sqlite_functions(connection: sqlite3.Connection) -> None: + """ + Define numbits functions in a SQLite connection. + + This defines these functions for use in SQLite statements: + + * :func:`numbits_union` + * :func:`numbits_intersection` + * :func:`numbits_any_intersection` + * :func:`num_in_numbits` + * :func:`numbits_to_nums` + + `connection` is a :class:`sqlite3.Connection ` + object. After creating the connection, pass it to this function to + register the numbits functions. Then you can use numbits functions in your + queries:: + + import sqlite3 + from coverage.numbits import register_sqlite_functions + + conn = sqlite3.connect("example.db") + register_sqlite_functions(conn) + c = conn.cursor() + # Kind of a nonsense query: + # Find all the files and contexts that executed line 47 in any file: + c.execute( + "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", + (47,) + ) + """ + connection.create_function("numbits_union", 2, numbits_union) + connection.create_function("numbits_intersection", 2, numbits_intersection) + connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) + connection.create_function("num_in_numbits", 2, num_in_numbits) + connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b))) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/parser.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/parser.py new file mode 100644 index 0000000..94c102b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/parser.py @@ -0,0 +1,1213 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Code parsing for coverage.py.""" + +from __future__ import annotations + +import ast +import collections +import functools +import os +import re +import token +import tokenize +from collections.abc import Iterable, Sequence +from dataclasses import dataclass +from types import CodeType +from typing import Callable, Optional, Protocol, cast + +from coverage import env +from coverage.bytecode import code_objects +from coverage.debug import short_stack +from coverage.exceptions import NoSource, NotPython +from coverage.misc import isolate_module, nice_pair +from coverage.phystokens import generate_tokens +from coverage.types import TArc, TLineNo + +os = isolate_module(os) + + +class PythonParser: + """Parse code to find executable lines, excluded lines, etc. + + This information is all based on static analysis: no code execution is + involved. + + """ + + def __init__( + self, + text: str | None = None, + filename: str | None = None, + exclude: str | None = None, + ) -> None: + """ + Source can be provided as `text`, the text itself, or `filename`, from + which the text will be read. Excluded lines are those that match + `exclude`, a regex string. + + """ + assert text or filename, "PythonParser needs either text or filename" + self.filename = filename or "" + if text is not None: + self.text: str = text + else: + from coverage.python import get_python_source + + try: + self.text = get_python_source(self.filename) + except OSError as err: + raise NoSource(f"No source for code: '{self.filename}': {err}") from err + + self.exclude = exclude + + # The parsed AST of the text. + self._ast_root: ast.AST | None = None + + # The normalized line numbers of the statements in the code. Exclusions + # are taken into account, and statements are adjusted to their first + # lines. + self.statements: set[TLineNo] = set() + + # The normalized line numbers of the excluded lines in the code, + # adjusted to their first lines. + self.excluded: set[TLineNo] = set() + + # The raw_* attributes are only used in this class, and in + # lab/parser.py to show how this class is working. + + # The line numbers that start statements, as reported by the line + # number table in the bytecode. + self.raw_statements: set[TLineNo] = set() + + # The raw line numbers of excluded lines of code, as marked by pragmas. + self.raw_excluded: set[TLineNo] = set() + + # The line numbers of docstring lines. + self.raw_docstrings: set[TLineNo] = set() + + # Internal detail, used by lab/parser.py. + self.show_tokens = False + + # A dict mapping line numbers to lexical statement starts for + # multi-line statements. + self.multiline_map: dict[TLineNo, TLineNo] = {} + + # Lazily-created arc data, and missing arc descriptions. + self._all_arcs: set[TArc] | None = None + self._missing_arc_fragments: TArcFragments | None = None + self._with_jump_fixers: dict[TArc, tuple[TArc, TArc]] = {} + + def lines_matching(self, regex: str) -> set[TLineNo]: + """Find the lines matching a regex. + + Returns a set of line numbers, the lines that contain a match for + `regex`. The entire line needn't match, just a part of it. + Handles multiline regex patterns. + + """ + matches: set[TLineNo] = set() + + last_start = 0 + last_start_line = 0 + for match in re.finditer(regex, self.text, flags=re.MULTILINE): + start, end = match.span() + start_line = last_start_line + self.text.count("\n", last_start, start) + end_line = last_start_line + self.text.count("\n", last_start, end) + matches.update( + self.multiline_map.get(i, i) for i in range(start_line + 1, end_line + 2) + ) + last_start = start + last_start_line = start_line + return matches + + def _raw_parse(self) -> None: + """Parse the source to find the interesting facts about its lines. + + A handful of attributes are updated. + + """ + # Find lines which match an exclusion pattern. + if self.exclude: + self.raw_excluded = self.lines_matching(self.exclude) + self.excluded = set(self.raw_excluded) + + # The current number of indents. + indent: int = 0 + # An exclusion comment will exclude an entire clause at this indent. + exclude_indent: int = 0 + # Are we currently excluding lines? + excluding: bool = False + # The line number of the first line in a multi-line statement. + first_line: int = 0 + # Is the file empty? + empty: bool = True + # Parenthesis (and bracket) nesting level. + nesting: int = 0 + + assert self.text is not None + tokgen = generate_tokens(self.text) + for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: + if self.show_tokens: # pragma: debugging + print( + "%10s %5s %-20r %r" + % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), + ttext, + ltext, + ) + ) + if toktype == token.INDENT: + indent += 1 + elif toktype == token.DEDENT: + indent -= 1 + elif toktype == token.OP: + if ttext == ":" and nesting == 0: + should_exclude = self.excluded.intersection(range(first_line, elineno + 1)) + if not excluding and should_exclude: + # Start excluding a suite. We trigger off of the colon + # token so that the #pragma comment will be recognized on + # the same line as the colon. + self.excluded.add(elineno) + exclude_indent = indent + excluding = True + elif ttext in "([{": + nesting += 1 + elif ttext in ")]}": + nesting -= 1 + elif toktype == token.NEWLINE: + if first_line and elineno != first_line: + # We're at the end of a line, and we've ended on a + # different line than the first line of the statement, + # so record a multi-line range. + for l in range(first_line, elineno + 1): + self.multiline_map[l] = first_line + first_line = 0 + + if ttext.strip() and toktype != tokenize.COMMENT: + # A non-white-space token. + empty = False + if not first_line: + # The token is not white space, and is the first in a statement. + first_line = slineno + # Check whether to end an excluded suite. + if excluding and indent <= exclude_indent: + excluding = False + if excluding: + self.excluded.add(elineno) + + # Find the starts of the executable statements. + if not empty: + byte_parser = ByteParser(self.text, filename=self.filename) + self.raw_statements.update(byte_parser._find_statements()) + + self.excluded = self.first_lines(self.excluded) + + # AST lets us find classes, docstrings, and decorator-affected + # functions and classes. + assert self._ast_root is not None + for node in ast.walk(self._ast_root): + # Find docstrings. + if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef, ast.Module)): + if node.body: + first = node.body[0] + if ( + isinstance(first, ast.Expr) + and isinstance(first.value, ast.Constant) + and isinstance(first.value.value, str) + ): + self.raw_docstrings.update( + range(first.lineno, cast(int, first.end_lineno) + 1) + ) + # Exclusions carry from decorators and signatures to the bodies of + # functions and classes. + if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + first_line = min((d.lineno for d in node.decorator_list), default=node.lineno) + if self.excluded.intersection(range(first_line, node.lineno + 1)): + self.excluded.update(range(first_line, cast(int, node.end_lineno) + 1)) + + @functools.lru_cache(maxsize=1000) + def first_line(self, lineno: TLineNo) -> TLineNo: + """Return the first line number of the statement including `lineno`.""" + if lineno < 0: + lineno = -self.multiline_map.get(-lineno, -lineno) + else: + lineno = self.multiline_map.get(lineno, lineno) + return lineno + + def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]: + """Map the line numbers in `linenos` to the correct first line of the + statement. + + Returns a set of the first lines. + + """ + return {self.first_line(l) for l in linenos} + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + """Implement `FileReporter.translate_lines`.""" + return self.first_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + """Implement `FileReporter.translate_arcs`.""" + return {(self.first_line(a), self.first_line(b)) for (a, b) in self.fix_with_jumps(arcs)} + + def parse_source(self) -> None: + """Parse source text to find executable lines, excluded lines, etc. + + Sets the .excluded and .statements attributes, normalized to the first + line of multi-line statements. + + """ + try: + self._ast_root = ast.parse(self.text) + self._raw_parse() + except (tokenize.TokenError, IndentationError, SyntaxError) as err: + if hasattr(err, "lineno"): + lineno = err.lineno # IndentationError + else: + lineno = err.args[1][0] # TokenError + raise NotPython( + f"Couldn't parse '{self.filename}' as Python source: " + + f"{err.args[0]!r} at line {lineno}", + ) from err + + ignore = self.excluded | self.raw_docstrings + starts = self.raw_statements - ignore + self.statements = self.first_lines(starts) - ignore + + def arcs(self) -> set[TArc]: + """Get information about the arcs available in the code. + + Returns a set of line number pairs. Line numbers have been normalized + to the first line of multi-line statements. + + """ + if self._all_arcs is None: + self._analyze_ast() + assert self._all_arcs is not None + return self._all_arcs + + def _analyze_ast(self) -> None: + """Run the AstArcAnalyzer and save its results. + + `_all_arcs` is the set of arcs in the code. + + """ + assert self._ast_root is not None + aaa = AstArcAnalyzer(self.filename, self._ast_root, self.raw_statements, self.multiline_map) + aaa.analyze() + arcs = aaa.arcs + self._with_jump_fixers = aaa.with_jump_fixers() + if self._with_jump_fixers: + arcs = self.fix_with_jumps(arcs) + + self._all_arcs = set() + for l1, l2 in arcs: + fl1 = self.first_line(l1) + fl2 = self.first_line(l2) + if fl1 != fl2: + self._all_arcs.add((fl1, fl2)) + + self._missing_arc_fragments = aaa.missing_arc_fragments + + def fix_with_jumps(self, arcs: Iterable[TArc]) -> set[TArc]: + """Adjust arcs to fix jumps leaving `with` statements. + + Consider this code: + + with open("/tmp/test", "w") as f1: + a = 2 + b = 3 + print(4) + + In 3.10+, we get traces for lines 1, 2, 3, 1, 4. But we want to present + it to the user as if it had been 1, 2, 3, 4. The arc 3->1 should be + replaced with 3->4, and 1->4 should be removed. + + For this code, the fixers dict is {(3, 1): ((1, 4), (3, 4))}. The key + is the actual measured arc from the end of the with block back to the + start of the with-statement. The values are start_next (the with + statement to the next statement after the with), and end_next (the end + of the with-statement to the next statement after the with). + + With nested with-statements, we have to trace through a few levels to + correct a longer chain of arcs. + + """ + to_remove = set() + to_add = set() + for arc in arcs: + if arc in self._with_jump_fixers: + end0 = arc[0] + to_remove.add(arc) + start_next, end_next = self._with_jump_fixers[arc] + while start_next in self._with_jump_fixers: + to_remove.add(start_next) + start_next, end_next = self._with_jump_fixers[start_next] + to_remove.add(end_next) + to_add.add((end0, end_next[1])) + to_remove.add(start_next) + arcs = (set(arcs) | to_add) - to_remove + return arcs + + @functools.lru_cache + def exit_counts(self) -> dict[TLineNo, int]: + """Get a count of exits from that each line. + + Excluded lines are excluded. + + """ + exit_counts: dict[TLineNo, int] = collections.defaultdict(int) + for l1, l2 in self.arcs(): + assert l1 > 0, f"{l1=} should be greater than zero in {self.filename}" + if l1 in self.excluded: + # Don't report excluded lines as line numbers. + continue + if l2 in self.excluded: + # Arcs to excluded lines shouldn't count. + continue + exit_counts[l1] += 1 + + return exit_counts + + def _finish_action_msg(self, action_msg: str | None, end: TLineNo) -> str: + """Apply some defaulting and formatting to an arc's description.""" + if action_msg is None: + if end < 0: + action_msg = "jump to the function exit" + else: + action_msg = "jump to line {lineno}" + action_msg = action_msg.format(lineno=end) + return action_msg + + def missing_arc_description(self, start: TLineNo, end: TLineNo) -> str: + """Provide an English sentence describing a missing arc.""" + if self._missing_arc_fragments is None: + self._analyze_ast() + assert self._missing_arc_fragments is not None + + fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) + + msgs = [] + for missing_cause_msg, action_msg in fragment_pairs: + action_msg = self._finish_action_msg(action_msg, end) + msg = f"line {start} didn't {action_msg}" + if missing_cause_msg is not None: + msg += f" because {missing_cause_msg.format(lineno=start)}" + + msgs.append(msg) + + return " or ".join(msgs) + + def arc_description(self, start: TLineNo, end: TLineNo) -> str: + """Provide an English description of an arc's effect.""" + if self._missing_arc_fragments is None: + self._analyze_ast() + assert self._missing_arc_fragments is not None + + fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) + action_msg = self._finish_action_msg(fragment_pairs[0][1], end) + return action_msg + + +class ByteParser: + """Parse bytecode to understand the structure of code.""" + + def __init__( + self, + text: str, + code: CodeType | None = None, + filename: str | None = None, + ) -> None: + self.text = text + if code is not None: + self.code = code + else: + assert filename is not None + # We only get here if earlier ast parsing succeeded, so no need to + # catch errors. + self.code = compile(text, filename, "exec", dont_inherit=True) + + def child_parsers(self) -> Iterable[ByteParser]: + """Iterate over all the code objects nested within this one. + + The iteration includes `self` as its first value. + + We skip code objects named `__annotate__` since they are deferred + annotations that usually are never run. If there are errors in the + annotations, they will be caught by type checkers or other tools that + use annotations. + + """ + return ( + ByteParser(self.text, code=c) + for c in code_objects(self.code) + if c.co_name != "__annotate__" + ) + + def _line_numbers(self) -> Iterable[TLineNo]: + """Yield the line numbers possible in this code object. + + Uses co_lines() to produce a sequence: l0, l1, ... + """ + for _, _, line in self.code.co_lines(): + if line: + yield line + + def _find_statements(self) -> Iterable[TLineNo]: + """Find the statements in `self.code`. + + Produce a sequence of line numbers that start statements. Recurses + into all code objects reachable from `self.code`. + + """ + for bp in self.child_parsers(): + # Get all of the lineno information from this code. + yield from bp._line_numbers() + + +# +# AST analysis +# + + +@dataclass(frozen=True, order=True) +class ArcStart: + """The information needed to start an arc. + + `lineno` is the line number the arc starts from. + + `cause` is an English text fragment used as the `missing_cause_msg` for + AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an + arc wasn't executed, so should fit well into a sentence of the form, + "Line 17 didn't run because {cause}." The fragment can include "{lineno}" + to have `lineno` interpolated into it. + + As an example, this code:: + + if something(x): # line 1 + func(x) # line 2 + more_stuff() # line 3 + + would have two ArcStarts: + + - ArcStart(1, "the condition on line 1 was always true") + - ArcStart(1, "the condition on line 1 was never true") + + The first would be used to create an arc from 1 to 3, creating a message like + "line 1 didn't jump to line 3 because the condition on line 1 was always true." + + The second would be used for the arc from 1 to 2, creating a message like + "line 1 didn't jump to line 2 because the condition on line 1 was never true." + + """ + + lineno: TLineNo + cause: str = "" + + +class TAddArcFn(Protocol): + """The type for AstArcAnalyzer.add_arc().""" + + def __call__( + self, + start: TLineNo, + end: TLineNo, + missing_cause_msg: str | None = None, + action_msg: str | None = None, + ) -> None: + """ + Record an arc from `start` to `end`. + + `missing_cause_msg` is a description of the reason the arc wasn't + taken if it wasn't taken. For example, "the condition on line 10 was + never true." + + `action_msg` is a description of what the arc does, like "jump to line + 10" or "exit from function 'fooey'." + + """ + + +TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]] + + +class Block: + """ + Blocks need to handle various exiting statements in their own ways. + + All of these methods take a list of exits, and a callable `add_arc` + function that they can use to add arcs if needed. They return True if the + exits are handled, or False if the search should continue up the block + stack. + """ + + # pylint: disable=unused-argument + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process break exits.""" + return False + + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process continue exits.""" + return False + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process raise exits.""" + return False + + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process return exits.""" + return False + + +class LoopBlock(Block): + """A block on the block stack representing a `for` or `while` loop.""" + + def __init__(self, start: TLineNo) -> None: + # The line number where the loop starts. + self.start = start + # A set of ArcStarts, the arcs from break statements exiting this loop. + self.break_exits: set[ArcStart] = set() + + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + self.break_exits.update(exits) + return True + + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc(xit.lineno, self.start, xit.cause) + return True + + +class FunctionBlock(Block): + """A block on the block stack representing a function definition.""" + + def __init__(self, start: TLineNo, name: str) -> None: + # The line number where the function starts. + self.start = start + # The name of the function. + self.name = name + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, + -self.start, + xit.cause, + f"except from function {self.name!r}", + ) + return True + + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, + -self.start, + xit.cause, + f"return from function {self.name!r}", + ) + return True + + +class TryBlock(Block): + """A block on the block stack representing a `try` block.""" + + def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None: + # The line number of the first "except" handler, if any. + self.handler_start = handler_start + # The line number of the "finally:" clause, if any. + self.final_start = final_start + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.handler_start is not None: + for xit in exits: + add_arc(xit.lineno, self.handler_start, xit.cause) + return True + + +# TODO: Shouldn't the cause messages join with "and" instead of "or"? + + +def is_constant_test_expr(node: ast.AST) -> tuple[bool, bool]: + """Is this a compile-time constant test expression? + + We don't try to mimic all of CPython's optimizations. We just have to + handle the kinds of constant expressions people might actually use. + + """ + match node: + case ast.Constant(): + return True, bool(node.value) + case ast.Name(): + if node.id in ["True", "False", "None", "__debug__"]: + return True, eval(node.id) # pylint: disable=eval-used + case ast.UnaryOp(): + if isinstance(node.op, ast.Not): + is_constant, val = is_constant_test_expr(node.operand) + return is_constant, not val + case ast.BoolOp(): + rets = [is_constant_test_expr(v) for v in node.values] + is_constant = all(is_const for is_const, _ in rets) + if is_constant: + op = any if isinstance(node.op, ast.Or) else all + return True, op(v for _, v in rets) + return False, False + + +class AstArcAnalyzer: + """Analyze source text with an AST to find executable code paths. + + The .analyze() method does the work, and populates these attributes: + + `arcs`: a set of (from, to) pairs of the the arcs possible in the code. + + `missing_arc_fragments`: a dict mapping (from, to) arcs to lists of + message fragments explaining why the arc is missing from execution:: + + { (start, end): [(missing_cause_msg, action_msg), ...], } + + For an arc starting from line 17, they should be usable to form complete + sentences like: "Line 17 didn't {action_msg} because {missing_cause_msg}". + + NOTE: Starting in July 2024, I've been whittling this down to only report + arc that are part of true branches. It's not clear how far this work will + go. + + """ + + def __init__( + self, + filename: str, + root_node: ast.AST, + statements: set[TLineNo], + multiline: dict[TLineNo, TLineNo], + ) -> None: + self.filename = filename + self.root_node = root_node + self.statements = {multiline.get(l, l) for l in statements} + self.multiline = multiline + + # Turn on AST dumps with an environment variable. + # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. + dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0"))) + + if dump_ast: # pragma: debugging + # Dump the AST so that failing tests have helpful output. + print(f"Statements: {self.statements}") + print(f"Multiline map: {self.multiline}") + print(ast.dump(self.root_node, include_attributes=True, indent=4)) + + self.arcs: set[TArc] = set() + self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) + self.block_stack: list[Block] = [] + + # If `with` clauses jump to their start on the way out, we need + # information to be able to skip over that jump. We record the arcs + # from `with` into the clause (with_entries), and the arcs from the + # clause to the `with` (with_exits). + self.current_with_starts: set[TLineNo] = set() + self.all_with_starts: set[TLineNo] = set() + self.with_entries: set[TArc] = set() + self.with_exits: set[TArc] = set() + + # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. + self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0"))) + + def analyze(self) -> None: + """Examine the AST tree from `self.root_node` to determine possible arcs.""" + for node in ast.walk(self.root_node): + node_name = node.__class__.__name__ + code_object_handler = getattr(self, f"_code_object__{node_name}", None) + if code_object_handler is not None: + code_object_handler(node) + + def with_jump_fixers(self) -> dict[TArc, tuple[TArc, TArc]]: + """Get a dict with data for fixing jumps out of with statements. + + Returns a dict. The keys are arcs leaving a with-statement by jumping + back to its start. The values are pairs: first, the arc from the start + to the next statement, then the arc that exits the with without going + to the start. + + """ + fixers = {} + with_nexts = { + arc + for arc in self.arcs + if arc[0] in self.all_with_starts and arc not in self.with_entries + } + for start in self.all_with_starts: + nexts = {arc[1] for arc in with_nexts if arc[0] == start} + if not nexts: + continue + assert len(nexts) == 1, f"Expected one arc, got {nexts} with {start = }" + nxt = nexts.pop() + ends = {arc[0] for arc in self.with_exits if arc[1] == start} + for end in ends: + fixers[(end, start)] = ((start, nxt), (end, nxt)) + return fixers + + # Code object dispatchers: _code_object__* + # + # These methods are used by analyze() as the start of the analysis. + # There is one for each construct with a code object. + + def _code_object__Module(self, node: ast.Module) -> None: + start = self.line_for_node(node) + if node.body: + exits = self.process_body(node.body) + for xit in exits: + self.add_arc(xit.lineno, -start, xit.cause, "exit the module") + else: + # Empty module. + self.add_arc(start, -start) + + def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None: + start = self.line_for_node(node) + self.block_stack.append(FunctionBlock(start=start, name=node.name)) + exits = self.process_body(node.body) + self.process_return_exits(exits) + self.block_stack.pop() + + _code_object__AsyncFunctionDef = _code_object__FunctionDef + + def _code_object__ClassDef(self, node: ast.ClassDef) -> None: + start = self.line_for_node(node) + exits = self.process_body(node.body) + for xit in exits: + self.add_arc(xit.lineno, -start, xit.cause, f"exit class {node.name!r}") + + def add_arc( + self, + start: TLineNo, + end: TLineNo, + missing_cause_msg: str | None = None, + action_msg: str | None = None, + ) -> None: + """Add an arc, including message fragments to use if it is missing.""" + if self.debug: # pragma: debugging + print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}") + print(short_stack(), end="\n\n") + self.arcs.add((start, end)) + if start in self.current_with_starts: + self.with_entries.add((start, end)) + + if missing_cause_msg is not None or action_msg is not None: + self.missing_arc_fragments[(start, end)].append((missing_cause_msg, action_msg)) + + def nearest_blocks(self) -> Iterable[Block]: + """Yield the blocks in nearest-to-farthest order.""" + return reversed(self.block_stack) + + def line_for_node(self, node: ast.AST) -> TLineNo: + """What is the right line number to use for this node? + + This dispatches to _line__Node functions where needed. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], TLineNo]], + getattr(self, f"_line__{node_name}", None), + ) + if handler is not None: + line = handler(node) + else: + line = node.lineno # type: ignore[attr-defined] + return self.multiline.get(line, line) + + # First lines: _line__* + # + # Dispatched by line_for_node, each method knows how to identify the first + # line number in the node, as Python will report it. + + def _line_decorated(self, node: ast.FunctionDef) -> TLineNo: + """Compute first line number for things that can be decorated (classes and functions).""" + if node.decorator_list: + lineno = node.decorator_list[0].lineno + else: + lineno = node.lineno + return lineno + + def _line__Assign(self, node: ast.Assign) -> TLineNo: + return self.line_for_node(node.value) + + _line__ClassDef = _line_decorated + + def _line__Dict(self, node: ast.Dict) -> TLineNo: + if node.keys: + if node.keys[0] is not None: + return node.keys[0].lineno + else: + # Unpacked dict literals `{**{"a":1}}` have None as the key, + # use the value in that case. + return node.values[0].lineno + else: + return node.lineno + + _line__FunctionDef = _line_decorated + _line__AsyncFunctionDef = _line_decorated + + def _line__List(self, node: ast.List) -> TLineNo: + if node.elts: + return self.line_for_node(node.elts[0]) + else: + return node.lineno + + def _line__Module(self, node: ast.Module) -> TLineNo: # pylint: disable=unused-argument + return 1 + + # The node types that just flow to the next node with no complications. + OK_TO_DEFAULT = { + "AnnAssign", + "Assign", + "Assert", + "AugAssign", + "Delete", + "Expr", + "Global", + "Import", + "ImportFrom", + "Nonlocal", + "Pass", + } + + def node_exits(self, node: ast.AST) -> set[ArcStart]: + """Find the set of arc starts that exit this node. + + Return a set of ArcStarts, exits from this node to the next. Because a + node represents an entire sub-tree (including its children), the exits + from a node can be arbitrarily complex:: + + if something(1): + if other(2): + doit(3) + else: + doit(5) + + There are three exits from line 1: they start at lines 1, 3 and 5. + There are two exits from line 2: lines 3 and 5. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], set[ArcStart]]], + getattr(self, f"_handle__{node_name}", None), + ) + if handler is not None: + arc_starts = handler(node) + else: + # No handler: either it's something that's ok to default (a simple + # statement), or it's something we overlooked. + if env.TESTING: + if node_name not in self.OK_TO_DEFAULT: + raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure + + # Default for simple statements: one exit from this node. + arc_starts = {ArcStart(self.line_for_node(node))} + return arc_starts + + def process_body( + self, + body: Sequence[ast.AST], + from_start: ArcStart | None = None, + prev_starts: set[ArcStart] | None = None, + ) -> set[ArcStart]: + """Process the body of a compound statement. + + `body` is the body node to process. + + `from_start` is a single `ArcStart` that starts an arc into this body. + `prev_starts` is a set of ArcStarts that can all be the start of arcs + into this body. Only one of `from_start` and `prev_starts` should be + given. + + Records arcs within the body by calling `self.add_arc`. + + Returns a set of ArcStarts, the exits from this body. + + """ + if prev_starts is None: + if from_start is None: + prev_starts = set() + else: + prev_starts = {from_start} + else: + assert from_start is None + + # Loop over the nodes in the body, making arcs from each one's exits to + # the next node. + for body_node in body: + lineno = self.line_for_node(body_node) + if lineno not in self.statements: + continue + for prev_start in prev_starts: + self.add_arc(prev_start.lineno, lineno, prev_start.cause) + prev_starts = self.node_exits(body_node) + return prev_starts + + # Exit processing: process_*_exits + # + # These functions process the four kinds of jump exits: break, continue, + # raise, and return. To figure out where an exit goes, we have to look at + # the block stack context. For example, a break will jump to the nearest + # enclosing loop block, or the nearest enclosing finally block, whichever + # is nearer. + + def process_break_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being breaks.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_break_exits(exits, self.add_arc): + break + + def process_continue_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being continues.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_continue_exits(exits, self.add_arc): + break + + def process_raise_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being raises.""" + for block in self.nearest_blocks(): + if block.process_raise_exits(exits, self.add_arc): + break + + def process_return_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being returns.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_return_exits(exits, self.add_arc): + break + + # Node handlers: _handle__* + # + # Each handler deals with a specific AST node type, dispatched from + # node_exits. Handlers return the set of exits from that node, and can + # also call self.add_arc to record arcs they find. These functions mirror + # the Python semantics of each syntactic construct. See the docstring + # for node_exits to understand the concept of exits from a node. + # + # Every node type that represents a statement should have a handler, or it + # should be listed in OK_TO_DEFAULT. + + def _handle__Break(self, node: ast.Break) -> set[ArcStart]: + here = self.line_for_node(node) + break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") + self.process_break_exits({break_start}) + return set() + + def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]: + """Add arcs for things that can be decorated (classes and functions).""" + main_line: TLineNo = node.lineno + last: TLineNo | None = node.lineno + decs = node.decorator_list + if decs: + last = None + for dec_node in decs: + dec_start = self.line_for_node(dec_node) + if last is not None and dec_start != last: + self.add_arc(last, dec_start) + last = dec_start + assert last is not None + self.add_arc(last, main_line) + last = main_line + # The definition line may have been missed, but we should have it + # in `self.statements`. For some constructs, `line_for_node` is + # not what we'd think of as the first line in the statement, so map + # it to the first one. + assert node.body, f"Oops: {node.body = } in {self.filename}@{node.lineno}" + # The body is handled in collect_arcs. + assert last is not None + return {ArcStart(last)} + + _handle__ClassDef = _handle_decorated + + def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]: + here = self.line_for_node(node) + continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") + self.process_continue_exits({continue_start}) + return set() + + def _handle__For(self, node: ast.For) -> set[ArcStart]: + start = self.line_for_node(node.iter) + self.block_stack.append(LoopBlock(start=start)) + from_start = ArcStart(start, cause="the loop on line {lineno} never started") + exits = self.process_body(node.body, from_start=from_start) + # Any exit from the body will go back to the top of the loop. + for xit in exits: + self.add_arc(xit.lineno, start, xit.cause) + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits = my_block.break_exits + from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") + if node.orelse: + else_exits = self.process_body(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No else clause: exit from the for line. + exits.add(from_start) + return exits + + _handle__AsyncFor = _handle__For + + _handle__FunctionDef = _handle_decorated + _handle__AsyncFunctionDef = _handle_decorated + + def _handle__If(self, node: ast.If) -> set[ArcStart]: + start = self.line_for_node(node.test) + constant_test, val = is_constant_test_expr(node.test) + exits = set() + if not constant_test or val: + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits |= self.process_body(node.body, from_start=from_start) + if not constant_test or not val: + from_start = ArcStart(start, cause="the condition on line {lineno} was always true") + exits |= self.process_body(node.orelse, from_start=from_start) + return exits + + def _handle__Match(self, node: ast.Match) -> set[ArcStart]: + start = self.line_for_node(node) + last_start = start + exits = set() + for case in node.cases: + case_start = self.line_for_node(case.pattern) + self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") + from_start = ArcStart( + case_start, + cause="the pattern on line {lineno} never matched", + ) + exits |= self.process_body(case.body, from_start=from_start) + last_start = case_start + + # case is now the last case, check for wildcard match. + pattern = case.pattern # pylint: disable=undefined-loop-variable + while isinstance(pattern, ast.MatchOr): + pattern = pattern.patterns[-1] + while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None: + pattern = pattern.pattern + had_wildcard = ( + isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable + ) + + if not had_wildcard: + exits.add( + ArcStart(case_start, cause="the pattern on line {lineno} always matched"), + ) + return exits + + def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]: + here = self.line_for_node(node) + raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") + self.process_raise_exits({raise_start}) + # `raise` statement jumps away, no exits from here. + return set() + + def _handle__Return(self, node: ast.Return) -> set[ArcStart]: + here = self.line_for_node(node) + return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") + self.process_return_exits({return_start}) + # `return` statement jumps away, no exits from here. + return set() + + def _handle__Try(self, node: ast.Try) -> set[ArcStart]: + if node.handlers: + handler_start = self.line_for_node(node.handlers[0]) + else: + handler_start = None + + if node.finalbody: + final_start = self.line_for_node(node.finalbody[0]) + else: + final_start = None + + # This is true by virtue of Python syntax: have to have either except + # or finally, or both. + assert handler_start is not None or final_start is not None + try_block = TryBlock(handler_start, final_start) + self.block_stack.append(try_block) + + start = self.line_for_node(node) + exits = self.process_body(node.body, from_start=ArcStart(start)) + + # We're done with the `try` body, so this block no longer handles + # exceptions. We keep the block so the `finally` clause can pick up + # flows from the handlers and `else` clause. + if node.finalbody: + try_block.handler_start = None + else: + self.block_stack.pop() + + handler_exits: set[ArcStart] = set() + + if node.handlers: + for handler_node in node.handlers: + handler_start = self.line_for_node(handler_node) + from_cause = "the exception caught by line {lineno} didn't happen" + from_start = ArcStart(handler_start, cause=from_cause) + handler_exits |= self.process_body(handler_node.body, from_start=from_start) + + if node.orelse: + exits = self.process_body(node.orelse, prev_starts=exits) + + exits |= handler_exits + + if node.finalbody: + self.block_stack.pop() + final_from = exits + + final_exits = self.process_body(node.finalbody, prev_starts=final_from) + + if exits: + # The finally clause's exits are only exits for the try block + # as a whole if the try block had some exits to begin with. + exits = final_exits + + return exits + + def _handle__While(self, node: ast.While) -> set[ArcStart]: + start = to_top = self.line_for_node(node.test) + constant_test, _ = is_constant_test_expr(node.test) + self.block_stack.append(LoopBlock(start=to_top)) + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits = self.process_body(node.body, from_start=from_start) + for xit in exits: + self.add_arc(xit.lineno, to_top, xit.cause) + exits = set() + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits.update(my_block.break_exits) + from_start = ArcStart(start, cause="the condition on line {lineno} was always true") + if node.orelse: + else_exits = self.process_body(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No `else` clause: you can exit from the start. + if not constant_test: + exits.add(from_start) + return exits + + def _handle__With(self, node: ast.With) -> set[ArcStart]: + if env.PYBEHAVIOR.exit_with_through_ctxmgr: + starts = [self.line_for_node(item.context_expr) for item in node.items] + else: + starts = [self.line_for_node(node)] + for start in starts: + self.current_with_starts.add(start) + self.all_with_starts.add(start) + + exits = self.process_body(node.body, from_start=ArcStart(starts[-1])) + + start = starts[-1] + self.current_with_starts.remove(start) + with_exit = {ArcStart(start)} + if exits: + for xit in exits: + self.add_arc(xit.lineno, start) + self.with_exits.add((xit.lineno, start)) + exits = with_exit + + return exits + + _handle__AsyncWith = _handle__With diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/patch.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/patch.py new file mode 100644 index 0000000..42ce036 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/patch.py @@ -0,0 +1,166 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Invasive patches for coverage.py.""" + +from __future__ import annotations + +import atexit +import contextlib +import os +import site +from pathlib import Path +from typing import TYPE_CHECKING, Any, NoReturn + +from coverage import env +from coverage.debug import NoDebugging, DevNullDebug +from coverage.exceptions import ConfigError, CoverageException + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.config import CoverageConfig + from coverage.types import TDebugCtl + + +def apply_patches( + cov: Coverage, + config: CoverageConfig, + debug: TDebugCtl, + *, + make_pth_file: bool = True, +) -> None: + """Apply invasive patches requested by `[run] patch=`.""" + debug = debug if debug.should("patch") else DevNullDebug() + for patch in sorted(set(config.patch)): + match patch: + case "_exit": + _patch__exit(cov, debug) + + case "execv": + _patch_execv(cov, config, debug) + + case "fork": + _patch_fork(debug) + + case "subprocess": + _patch_subprocess(config, debug, make_pth_file) + + case _: + raise ConfigError(f"Unknown patch {patch!r}") + + +def _patch__exit(cov: Coverage, debug: TDebugCtl) -> None: + """Patch os._exit.""" + debug.write("Patching _exit") + + old_exit = os._exit + + def coverage_os_exit_patch(status: int) -> NoReturn: + with contextlib.suppress(Exception): + debug.write(f"Using _exit patch with {cov = }") + with contextlib.suppress(Exception): + cov.save() + old_exit(status) + + os._exit = coverage_os_exit_patch + + +def _patch_execv(cov: Coverage, config: CoverageConfig, debug: TDebugCtl) -> None: + """Patch the execv family of functions.""" + if env.WINDOWS: + raise CoverageException("patch=execv isn't supported yet on Windows.") + + debug.write("Patching execv") + + def make_execv_patch(fname: str, old_execv: Any) -> Any: + def coverage_execv_patch(*args: Any, **kwargs: Any) -> Any: + with contextlib.suppress(Exception): + debug.write(f"Using execv patch for {fname} with {cov = }") + with contextlib.suppress(Exception): + cov.save() + + if fname.endswith("e"): + # Assume the `env` argument is passed positionally. + new_env = args[-1] + # Pass our configuration in the new environment. + new_env["COVERAGE_PROCESS_CONFIG"] = config.serialize() + if env.TESTING: + # The subprocesses need to use the same core as the main process. + new_env["COVERAGE_CORE"] = os.getenv("COVERAGE_CORE") + + # When testing locally, we need to honor the pyc file location + # or they get written to the .tox directories and pollute the + # next run with a different core. + if (cache_prefix := os.getenv("PYTHONPYCACHEPREFIX")) is not None: + new_env["PYTHONPYCACHEPREFIX"] = cache_prefix + + # Without this, it fails on PyPy and Ubuntu. + new_env["PATH"] = os.getenv("PATH") + old_execv(*args, **kwargs) + + return coverage_execv_patch + + # All the exec* and spawn* functions eventually call execv or execve. + os.execv = make_execv_patch("execv", os.execv) + os.execve = make_execv_patch("execve", os.execve) + + +def _patch_fork(debug: TDebugCtl) -> None: + """Ensure Coverage is properly reset after a fork.""" + from coverage.control import _after_fork_in_child + + if env.WINDOWS: + raise CoverageException("patch=fork isn't supported yet on Windows.") + + debug.write("Patching fork") + os.register_at_fork(after_in_child=_after_fork_in_child) + + +def _patch_subprocess(config: CoverageConfig, debug: TDebugCtl, make_pth_file: bool) -> None: + """Write .pth files and set environment vars to measure subprocesses.""" + debug.write("Patching subprocess") + + if make_pth_file: + pth_files = create_pth_files(debug) + + def delete_pth_files() -> None: + for p in pth_files: + debug.write(f"Deleting subprocess .pth file: {str(p)!r}") + p.unlink(missing_ok=True) + + atexit.register(delete_pth_files) + assert config.config_file is not None + os.environ["COVERAGE_PROCESS_CONFIG"] = config.serialize() + + +# Writing .pth files is not obvious. On Windows, getsitepackages() returns two +# directories. A .pth file in the first will be run, but coverage isn't +# importable yet. We write into all the places we can, but with defensive +# import code. + +PTH_CODE = """\ +try: + import coverage +except: + pass +else: + coverage.process_startup() +""" + +PTH_TEXT = f"import sys; exec({PTH_CODE!r})\n" + + +def create_pth_files(debug: TDebugCtl = NoDebugging()) -> list[Path]: + """Create .pth files for measuring subprocesses.""" + pth_files = [] + for pth_dir in site.getsitepackages(): + pth_file = Path(pth_dir) / f"subcover_{os.getpid()}.pth" + try: + if debug.should("patch"): + debug.write(f"Writing subprocess .pth file: {str(pth_file)!r}") + pth_file.write_text(PTH_TEXT, encoding="utf-8") + except OSError: # pragma: cant happen + continue + else: + pth_files.append(pth_file) + return pth_files diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/phystokens.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/phystokens.py new file mode 100644 index 0000000..bac06c0 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/phystokens.py @@ -0,0 +1,197 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Better tokenizing for coverage.py.""" + +from __future__ import annotations + +import ast +import io +import keyword +import re +import sys +import token +import tokenize +from collections.abc import Iterable + +from coverage import env +from coverage.types import TLineNo, TSourceTokenLines + +TokenInfos = Iterable[tokenize.TokenInfo] + + +def _phys_tokens(toks: TokenInfos) -> TokenInfos: + """Return all physical tokens, even line continuations. + + tokenize.generate_tokens() doesn't return a token for the backslash that + continues lines. This wrapper provides those tokens so that we can + re-create a faithful representation of the original source. + + Returns the same values as generate_tokens() + + """ + last_line: str | None = None + last_lineno = -1 + last_ttext: str = "" + for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: + if last_lineno != elineno: + if last_line and last_line.endswith("\\\n"): + # We are at the beginning of a new line, and the last line + # ended with a backslash. We probably have to inject a + # backslash token into the stream. Unfortunately, there's more + # to figure out. This code:: + # + # usage = """\ + # HEY THERE + # """ + # + # triggers this condition, but the token text is:: + # + # '"""\\\nHEY THERE\n"""' + # + # so we need to figure out if the backslash is already in the + # string token or not. + inject_backslash = True + if last_ttext.endswith("\\"): + inject_backslash = False + elif ttype == token.STRING: + if ( # pylint: disable=simplifiable-if-statement + last_line.endswith("\\\n") + and last_line.rstrip(" \\\n").endswith(last_ttext) + ): + # Deal with special cases like such code:: + # + # a = ["aaa",\ # there may be zero or more blanks between "," and "\". + # "bbb \ + # ccc"] + # + inject_backslash = True + else: + # It's a multi-line string and the first line ends with + # a backslash, so we don't need to inject another. + inject_backslash = False + elif env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE: + inject_backslash = False + if inject_backslash: + # Figure out what column the backslash is in. + ccol = len(last_line.split("\n")[-2]) - 1 + # Yield the token, with a fake token type. + yield tokenize.TokenInfo( + 99999, + "\\\n", + (slineno, ccol), + (slineno, ccol + 2), + last_line, + ) + last_line = ltext + if ttype not in (tokenize.NEWLINE, tokenize.NL): + last_ttext = ttext + yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext) + last_lineno = elineno + + +def find_soft_key_lines(source: str) -> set[TLineNo]: + """Helper for finding lines with soft keywords, like match/case lines.""" + soft_key_lines: set[TLineNo] = set() + + for node in ast.walk(ast.parse(source)): + if isinstance(node, ast.Match): + soft_key_lines.add(node.lineno) + for case in node.cases: + soft_key_lines.add(case.pattern.lineno) + elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias): + soft_key_lines.add(node.lineno) + + return soft_key_lines + + +def source_token_lines(source: str) -> TSourceTokenLines: + """Generate a series of lines, one for each line in `source`. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back, with two differences: + trailing white space is not preserved, and a final line with no newline + is indistinguishable from a final line with a newline. + + """ + + ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} + line: list[tuple[str, str]] = [] + col = 0 + + source = source.expandtabs(8).replace("\r\n", "\n") + tokgen = generate_tokens(source) + + soft_key_lines = find_soft_key_lines(source) + + for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen): + mark_start = True + for part in re.split("(\n)", ttext): + if part == "\n": + yield line + line = [] + col = 0 + mark_end = False + elif part == "": + mark_end = False + elif ttype in ws_tokens: + mark_end = False + else: + if env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE: + part = part.replace("{", "{{").replace("}", "}}") + ecol = scol + len(part) + if mark_start and scol > col: + line.append(("ws", " " * (scol - col))) + mark_start = False + tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3] + if ttype == token.NAME: + if keyword.iskeyword(ttext): + # Hard keywords are always keywords. + tok_class = "key" + elif keyword.issoftkeyword(ttext): + # Soft keywords appear at the start of their line. + if len(line) == 0: + is_start_of_line = True + elif (len(line) == 1) and line[0][0] == "ws": + is_start_of_line = True + else: + is_start_of_line = False + if is_start_of_line and sline in soft_key_lines: + tok_class = "key" + line.append((tok_class, part)) + mark_end = True + scol = 0 + if mark_end: + col = ecol + + if line: + yield line + + +def generate_tokens(text: str) -> TokenInfos: + """A helper around `tokenize.generate_tokens`. + + Originally this was used to cache the results, but it didn't seem to make + reporting go faster, and caused issues with using too much memory. + + """ + readline = io.StringIO(text).readline + return tokenize.generate_tokens(readline) + + +def source_encoding(source: bytes) -> str: + """Determine the encoding for `source`, according to PEP 263. + + `source` is a byte string: the text of the program. + + Returns a string, the name of the encoding. + + """ + readline = iter(source.splitlines(True)).__next__ + return tokenize.detect_encoding(readline)[0] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin.py new file mode 100644 index 0000000..bdfa317 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin.py @@ -0,0 +1,617 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +""" +.. versionadded:: 4.0 + +Plug-in interfaces for coverage.py. + +Coverage.py supports a few different kinds of plug-ins that change its +behavior: + +* File tracers implement tracing of non-Python file types. + +* Configurers add custom configuration, using Python code to change the + configuration. + +* Dynamic context switchers decide when the dynamic context has changed, for + example, to record what test function produced the coverage. + +To write a coverage.py plug-in, create a module with a subclass of +:class:`~coverage.CoveragePlugin`. You will override methods in your class to +participate in various aspects of coverage.py's processing. +Different types of plug-ins have to override different methods. + +Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` +to provide debugging information about their operation. + +Your module must also contain a ``coverage_init`` function that registers an +instance of your plug-in class:: + + import coverage + + class MyPlugin(coverage.CoveragePlugin): + ... + + def coverage_init(reg, options): + reg.add_file_tracer(MyPlugin()) + +You use the `reg` parameter passed to your ``coverage_init`` function to +register your plug-in object. The registration method you call depends on +what kind of plug-in it is. + +If your plug-in takes options, the `options` parameter is a dictionary of your +plug-in's options from the coverage.py configuration file. Use them however +you want to configure your object before registering it. + +Coverage.py will store its own information on your plug-in object, using +attributes whose names start with ``_coverage_``. Don't be startled. + +.. warning:: + Plug-ins are imported by coverage.py before it begins measuring code. + If you write a plugin in your own project, it might import your product + code before coverage.py can start measuring. This can result in your + own code being reported as missing. + + One solution is to put your plugins in your project tree, but not in + your importable Python package. + + +.. _file_tracer_plugins: + +File Tracers +============ + +File tracers implement measurement support for non-Python files. File tracers +implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim +files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report +on those files. + +In your ``coverage_init`` function, use the ``add_file_tracer`` method to +register your file tracer. + + +.. _configurer_plugins: + +Configurers +=========== + +.. versionadded:: 4.5 + +Configurers modify the configuration of coverage.py during start-up. +Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to +change the configuration. + +In your ``coverage_init`` function, use the ``add_configurer`` method to +register your configurer. + + +.. _dynamic_context_plugins: + +Dynamic Context Switchers +========================= + +.. versionadded:: 5.0 + +Dynamic context switcher plugins implement the +:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute +the context label for each measured frame. + +Computed context labels are useful when you want to group measured data without +modifying the source code. + +For example, you could write a plugin that checks `frame.f_code` to inspect +the currently executed method, and set the context label to a fully qualified +method name if it's an instance method of `unittest.TestCase` and the method +name starts with 'test'. Such a plugin would provide basic coverage grouping +by test and could be used with test runners that have no built-in coveragepy +support. + +In your ``coverage_init`` function, use the ``add_dynamic_context`` method to +register your dynamic context switcher. + +""" + +from __future__ import annotations + +import dataclasses +import functools +from collections.abc import Iterable +from types import FrameType +from typing import Any + +from coverage import files +from coverage.misc import _needs_to_implement +from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines + + +class CoveragePlugin: + """Base class for coverage.py plug-ins.""" + + _coverage_plugin_name: str + _coverage_enabled: bool + + def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument + """Get a :class:`FileTracer` object for a file. + + Plug-in type: file tracer. + + Every Python source file is offered to your plug-in to give it a chance + to take responsibility for tracing the file. If your plug-in can + handle the file, it should return a :class:`FileTracer` object. + Otherwise return None. + + There is no way to register your plug-in for particular files. + Instead, this method is invoked for all files as they are executed, + and the plug-in decides whether it can trace the file or not. + Be prepared for `filename` to refer to all kinds of files that have + nothing to do with your plug-in. + + The file name will be a Python file being executed. There are two + broad categories of behavior for a plug-in, depending on the kind of + files your plug-in supports: + + * Static file names: each of your original source files has been + converted into a distinct Python file. Your plug-in is invoked with + the Python file name, and it maps it back to its original source + file. + + * Dynamic file names: all of your source files are executed by the same + Python file. In this case, your plug-in implements + :meth:`FileTracer.dynamic_source_filename` to provide the actual + source file for each execution frame. + + `filename` is a string, the path to the file being considered. This is + the absolute real path to the file. If you are comparing to other + paths, be sure to take this into account. + + Returns a :class:`FileTracer` object to use to trace `filename`, or + None if this plug-in cannot trace this file. + + """ + return None + + def file_reporter( + self, + filename: str, # pylint: disable=unused-argument + ) -> FileReporter | str: # str should be Literal["python"] + """Get the :class:`FileReporter` class to use for a file. + + Plug-in type: file tracer. + + This will only be invoked if `filename` returns non-None from + :meth:`file_tracer`. It's an error to return None from this method. + + Returns a :class:`FileReporter` object to use to report on `filename`, + or the string `"python"` to have coverage.py treat the file as Python. + + """ + _needs_to_implement(self, "file_reporter") + + def dynamic_context( + self, + frame: FrameType, # pylint: disable=unused-argument + ) -> str | None: + """Get the dynamically computed context label for `frame`. + + Plug-in type: dynamic context. + + This method is invoked for each frame when outside of a dynamic + context, to see if a new dynamic context should be started. If it + returns a string, a new context label is set for this and deeper + frames. The dynamic context ends when this frame returns. + + Returns a string to start a new dynamic context, or None if no new + context should be started. + + """ + return None + + def find_executable_files( + self, + src_dir: str, # pylint: disable=unused-argument + ) -> Iterable[str]: + """Yield all of the executable files in `src_dir`, recursively. + + Plug-in type: file tracer. + + Executability is a plug-in-specific property, but generally means files + which would have been considered for coverage analysis, had they been + included automatically. + + Returns or yields a sequence of strings, the paths to files that could + have been executed, including files that had been executed. + + """ + return [] + + def configure(self, config: TConfigurable) -> None: + """Modify the configuration of coverage.py. + + Plug-in type: configurer. + + This method is called during coverage.py start-up, to give your plug-in + a chance to change the configuration. The `config` parameter is an + object with :meth:`~coverage.Coverage.get_option` and + :meth:`~coverage.Coverage.set_option` methods. Do not call any other + methods on the `config` object. + + """ + pass + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Get a list of information useful for debugging. + + Plug-in type: any. + + This method will be invoked for ``--debug=sys``. Your + plug-in can return any information it wants to be displayed. + + Returns a list of pairs: `[(name, value), ...]`. + + """ + return [] + + +class CoveragePluginBase: + """Plugins produce specialized objects, which point back to the original plugin.""" + + _coverage_plugin: CoveragePlugin + + +class FileTracer(CoveragePluginBase): + """Support needed for files during the execution phase. + + File tracer plug-ins implement subclasses of FileTracer to return from + their :meth:`~CoveragePlugin.file_tracer` method. + + You may construct this object from :meth:`CoveragePlugin.file_tracer` any + way you like. A natural choice would be to pass the file name given to + `file_tracer`. + + `FileTracer` objects should only be created in the + :meth:`CoveragePlugin.file_tracer` method. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def source_filename(self) -> str: + """The source file name for this file. + + This may be any file name you like. A key responsibility of a plug-in + is to own the mapping from Python execution back to whatever source + file name was originally the source of the code. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns the file name to credit with this execution. + + """ + _needs_to_implement(self, "source_filename") + + def has_dynamic_source_filename(self) -> bool: + """Does this FileTracer have dynamic source file names? + + FileTracers can provide dynamically determined file names by + implementing :meth:`dynamic_source_filename`. Invoking that function + is expensive. To determine whether to invoke it, coverage.py uses the + result of this function to know if it needs to bother invoking + :meth:`dynamic_source_filename`. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns True if :meth:`dynamic_source_filename` should be called to get + dynamic source file names. + + """ + return False + + def dynamic_source_filename( + self, + filename: str, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument + ) -> str | None: + """Get a dynamically computed source file name. + + Some plug-ins need to compute the source file name dynamically for each + frame. + + This function will not be invoked if + :meth:`has_dynamic_source_filename` returns False. + + Returns the source file name for this frame, or None if this frame + shouldn't be measured. + + """ + return None + + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: + """Get the range of source line numbers for a given a call frame. + + The call frame is examined, and the source line number in the original + file is returned. The return value is a pair of numbers, the starting + line number and the ending line number, both inclusive. For example, + returning (5, 7) means that lines 5, 6, and 7 should be considered + executed. + + This function might decide that the frame doesn't indicate any lines + from the source file were executed. Return (-1, -1) in this case to + tell coverage.py that no lines should be recorded for this frame. + + """ + lineno = frame.f_lineno + return lineno, lineno + + +@dataclasses.dataclass +class CodeRegion: + """Data for a region of code found by :meth:`FileReporter.code_regions`.""" + + #: The kind of region, like `"function"` or `"class"`. Must be one of the + #: singular values returned by :meth:`FileReporter.code_region_kinds`. + kind: str + + #: The name of the region. For example, a function or class name. + name: str + + #: The line in the source file to link to when navigating to the region. + #: Can be a line not mentioned in `lines`. + start: int + + #: The lines in the region. Should be lines that could be executed in the + #: region. For example, a class region includes all of the lines in the + #: methods of the class, but not the lines defining class attributes, since + #: they are executed on import, not as part of exercising the class. The + #: set can include non-executable lines like blanks and comments. + lines: set[int] + + def __lt__(self, other: CodeRegion) -> bool: + """To support sorting to make test-writing easier.""" + if self.name == other.name: + return min(self.lines) < min(other.lines) + return self.name < other.name + + +@functools.total_ordering +class FileReporter(CoveragePluginBase): + """Support needed for files during the analysis and reporting phases. + + File tracer plug-ins implement a subclass of `FileReporter`, and return + instances from their :meth:`CoveragePlugin.file_reporter` method. + + There are many methods here, but only :meth:`lines` is required, to provide + the set of executable lines in the file. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def __init__(self, filename: str) -> None: + """Simple initialization of a `FileReporter`. + + The `filename` argument is the path to the file being reported. This + will be available as the `.filename` attribute on the object. Other + method implementations on this base class rely on this attribute. + + """ + self.filename = filename + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} filename={self.filename!r}>" + + def relative_filename(self) -> str: + """Get the relative file name for this file. + + This file path will be displayed in reports. The default + implementation will supply the actual project-relative file path. You + only need to supply this method if you have an unusual syntax for file + paths. + + """ + return files.relative_filename(self.filename) + + def source(self) -> str: + """Get the source for the file. + + Returns a Unicode string. + + The base implementation simply reads the `self.filename` file and + decodes it as UTF-8. Override this method if your file isn't readable + as a text file, or if you need other encoding support. + + """ + with open(self.filename, encoding="utf-8") as f: + return f.read() + + def lines(self) -> set[TLineNo]: + """Get the executable lines in this file. + + Your plug-in must determine which lines in the file were possibly + executable. This method returns a set of those line numbers. + + Returns a set of line numbers. + + """ + _needs_to_implement(self, "lines") + + def excluded_lines(self) -> set[TLineNo]: + """Get the excluded executable lines in this file. + + Your plug-in can use any method it likes to allow the user to exclude + executable lines from consideration. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + """Translate recorded lines into reported lines. + + Some file formats will want to report lines slightly differently than + they are recorded. For example, Python records the last line of a + multi-line statement, but reports are nicer if they mention the first + line. + + Your plug-in can optionally define this method to perform these kinds + of adjustment. + + `lines` is a sequence of integers, the recorded line numbers. + + Returns a set of integers, the adjusted line numbers. + + The base implementation returns the numbers unchanged. + + """ + return set(lines) + + def arcs(self) -> set[TArc]: + """Get the executable arcs in this file. + + To support branch coverage, your plug-in needs to be able to indicate + possible execution paths, as a set of line number pairs. Each pair is + a `(prev, next)` pair indicating that execution can transition from the + `prev` line number to the `next` line number. + + Returns a set of pairs of line numbers. The default implementation + returns an empty set. + + """ + return set() + + def no_branch_lines(self) -> set[TLineNo]: + """Get the lines excused from branch coverage in this file. + + Your plug-in can use any method it likes to allow the user to exclude + lines from consideration of branch coverage. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + """Translate recorded arcs into reported arcs. + + Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of + line number pairs. + + Returns a set of line number pairs. + + The default implementation returns `arcs` unchanged. + + """ + return set(arcs) + + def exit_counts(self) -> dict[TLineNo, int]: + """Get a count of exits from that each line. + + To determine which lines are branches, coverage.py looks for lines that + have more than one exit. This function creates a dict mapping each + executable line number to a count of how many exits it has. + + To be honest, this feels wrong, and should be refactored. Let me know + if you attempt to implement this method in your plug-in... + + """ + return {} + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument + ) -> str: + """Provide an English sentence describing a missing arc. + + The `start` and `end` arguments are the line numbers of the missing + arc. Negative numbers indicate entering or exiting code objects. + + The `executed_arcs` argument is a set of line number pairs, the arcs + that were executed in this file. + + By default, this simply returns the string "Line {start} didn't jump + to {end}". + + """ + return f"Line {start} didn't jump to line {end}" + + def arc_description( + self, + start: TLineNo, # pylint: disable=unused-argument + end: TLineNo, + ) -> str: + """Provide an English description of an arc's effect.""" + return f"jump to line {end}" + + def source_token_lines(self) -> TSourceTokenLines: + """Generate a series of tokenized lines, one for each line in `source`. + + These tokens are used for syntax-colored reports. + + Each line is a list of pairs, each pair is a token:: + + [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ] + + Each pair has a token class, and the token text. The token classes + are: + + * ``"com"``: a comment + * ``"key"``: a keyword + * ``"nam"``: a name, or identifier + * ``"num"``: a number + * ``"op"``: an operator + * ``"str"``: a string literal + * ``"ws"``: some white space + * ``"txt"``: some other kind of text + + If you concatenate all the token texts, and then join them with + newlines, you should have your original source back. + + The default implementation simply returns each line tagged as + ``"txt"``. + + """ + for line in self.source().splitlines(): + yield [("txt", line)] + + def code_regions(self) -> Iterable[CodeRegion]: + """Identify regions in the source file for finer reporting than by file. + + Returns an iterable of :class:`CodeRegion` objects. The kinds reported + should be in the possibilities returned by :meth:`code_region_kinds`. + + """ + return [] + + def code_region_kinds(self) -> Iterable[tuple[str, str]]: + """Return the kinds of code regions this plugin can find. + + The returned pairs are the singular and plural forms of the kinds:: + + [ + ("function", "functions"), + ("class", "classes"), + ] + + This will usually be hard-coded, but could also differ by the specific + source file involved. + + """ + return [] + + def __eq__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename == other.filename + + def __lt__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename < other.filename + + # This object doesn't need to be hashed. + __hash__ = None # type: ignore[assignment] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin_support.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin_support.py new file mode 100644 index 0000000..2374b18 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/plugin_support.py @@ -0,0 +1,299 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Support for plugins.""" + +from __future__ import annotations + +import os +import os.path +import sys +from collections.abc import Iterable, Iterator +from types import FrameType +from typing import Any, Callable + +from coverage.exceptions import PluginError +from coverage.misc import isolate_module +from coverage.plugin import CoveragePlugin, FileReporter, FileTracer +from coverage.types import TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines + +os = isolate_module(os) + + +class Plugins: + """The currently loaded collection of coverage.py plugins.""" + + def __init__(self, debug: TDebugCtl | None = None) -> None: + self.order: list[CoveragePlugin] = [] + self.names: dict[str, CoveragePlugin] = {} + self.file_tracers: list[CoveragePlugin] = [] + self.configurers: list[CoveragePlugin] = [] + self.context_switchers: list[CoveragePlugin] = [] + + self.current_module: str | None = None + self.debug = debug + + def load_from_config( + self, + modules: Iterable[str], + config: TPluginConfig, + ) -> None: + """Load plugin modules, and read their settings from configuration.""" + + for module in modules: + self.current_module = module + __import__(module) + mod = sys.modules[module] + + coverage_init = getattr(mod, "coverage_init", None) + if not coverage_init: + raise PluginError( + f"Plugin module {module!r} didn't define a coverage_init function", + ) + + options = config.get_plugin_options(module) + coverage_init(self, options) + + self.current_module = None + + def load_from_callables( + self, + plugin_inits: Iterable[TCoverageInit], + ) -> None: + """Load plugins from callables provided.""" + for fn in plugin_inits: + fn(self) + + def add_file_tracer(self, plugin: CoveragePlugin) -> None: + """Add a file tracer plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.file_tracer` method. + + """ + self._add_plugin(plugin, self.file_tracers) + + def add_configurer(self, plugin: CoveragePlugin) -> None: + """Add a configuring plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.configure` method. + + """ + self._add_plugin(plugin, self.configurers) + + def add_dynamic_context(self, plugin: CoveragePlugin) -> None: + """Add a dynamic context plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.dynamic_context` method. + + """ + self._add_plugin(plugin, self.context_switchers) + + def add_noop(self, plugin: CoveragePlugin) -> None: + """Add a plugin that does nothing. + + This is only useful for testing the plugin support. + + """ + self._add_plugin(plugin, None) + + def _add_plugin( + self, + plugin: CoveragePlugin, + specialized: list[CoveragePlugin] | None, + ) -> None: + """Add a plugin object. + + `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` + is a list to append the plugin to. + + """ + plugin_name = f"{self.current_module}.{plugin.__class__.__name__}" + if self.debug and self.debug.should("plugin"): + self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}") + labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug) + plugin = DebugPluginWrapper(plugin, labelled) + + plugin._coverage_plugin_name = plugin_name + plugin._coverage_enabled = True + self.order.append(plugin) + self.names[plugin_name] = plugin + if specialized is not None: + specialized.append(plugin) + + def __bool__(self) -> bool: + return bool(self.order) + + def __iter__(self) -> Iterator[CoveragePlugin]: + return iter(self.order) + + def get(self, plugin_name: str) -> CoveragePlugin: + """Return a plugin by name.""" + return self.names[plugin_name] + + +TCoverageInit = Callable[[Plugins], None] + + +class LabelledDebug: + """A Debug writer, but with labels for prepending to the messages.""" + + def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()): + self.labels = list(prev_labels) + [label] + self.debug = debug + + def add_label(self, label: str) -> LabelledDebug: + """Add a label to the writer, and return a new `LabelledDebug`.""" + return LabelledDebug(label, self.debug, self.labels) + + def message_prefix(self) -> str: + """The prefix to use on messages, combining the labels.""" + prefixes = self.labels + [""] + return ":\n".join(" " * i + label for i, label in enumerate(prefixes)) + + def write(self, message: str) -> None: + """Write `message`, but with the labels prepended.""" + self.debug.write(f"{self.message_prefix()}{message}") + + +class DebugPluginWrapper(CoveragePlugin): + """Wrap a plugin, and use debug to report on what it's doing.""" + + def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: + super().__init__() + self.plugin = plugin + self.debug = debug + + def file_tracer(self, filename: str) -> FileTracer | None: + tracer = self.plugin.file_tracer(filename) + self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") + if tracer: + debug = self.debug.add_label(f"file {filename!r}") + tracer = DebugFileTracerWrapper(tracer, debug) + return tracer + + def file_reporter(self, filename: str) -> FileReporter | str: + reporter = self.plugin.file_reporter(filename) + assert isinstance(reporter, FileReporter) + self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") + if reporter: + debug = self.debug.add_label(f"file {filename!r}") + reporter = DebugFileReporterWrapper(filename, reporter, debug) + return reporter + + def dynamic_context(self, frame: FrameType) -> str | None: + context = self.plugin.dynamic_context(frame) + self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") + return context + + def find_executable_files(self, src_dir: str) -> Iterable[str]: + executable_files = self.plugin.find_executable_files(src_dir) + self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}") + return executable_files + + def configure(self, config: TConfigurable) -> None: + self.debug.write(f"configure({config!r})") + self.plugin.configure(config) + + def sys_info(self) -> Iterable[tuple[str, Any]]: + return self.plugin.sys_info() + + +class DebugFileTracerWrapper(FileTracer): + """A debugging `FileTracer`.""" + + def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: + self.tracer = tracer + self.debug = debug + + def _show_frame(self, frame: FrameType) -> str: + """A short string identifying a frame, for debug messages.""" + filename = os.path.basename(frame.f_code.co_filename) + return f"{filename}@{frame.f_lineno}" + + def source_filename(self) -> str: + sfilename = self.tracer.source_filename() + self.debug.write(f"source_filename() --> {sfilename!r}") + return sfilename + + def has_dynamic_source_filename(self) -> bool: + has = self.tracer.has_dynamic_source_filename() + self.debug.write(f"has_dynamic_source_filename() --> {has!r}") + return has + + def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: + dyn = self.tracer.dynamic_source_filename(filename, frame) + self.debug.write( + "dynamic_source_filename({!r}, {}) --> {!r}".format( + filename, + self._show_frame(frame), + dyn, + ) + ) + return dyn + + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: + pair = self.tracer.line_number_range(frame) + self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") + return pair + + +class DebugFileReporterWrapper(FileReporter): + """A debugging `FileReporter`.""" + + def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None: + super().__init__(filename) + self.reporter = reporter + self.debug = debug + + def relative_filename(self) -> str: + ret = self.reporter.relative_filename() + self.debug.write(f"relative_filename() --> {ret!r}") + return ret + + def lines(self) -> set[TLineNo]: + ret = self.reporter.lines() + self.debug.write(f"lines() --> {ret!r}") + return ret + + def excluded_lines(self) -> set[TLineNo]: + ret = self.reporter.excluded_lines() + self.debug.write(f"excluded_lines() --> {ret!r}") + return ret + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + ret = self.reporter.translate_lines(lines) + self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") + return ret + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + ret = self.reporter.translate_arcs(arcs) + self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") + return ret + + def no_branch_lines(self) -> set[TLineNo]: + ret = self.reporter.no_branch_lines() + self.debug.write(f"no_branch_lines() --> {ret!r}") + return ret + + def exit_counts(self) -> dict[TLineNo, int]: + ret = self.reporter.exit_counts() + self.debug.write(f"exit_counts() --> {ret!r}") + return ret + + def arcs(self) -> set[TArc]: + ret = self.reporter.arcs() + self.debug.write(f"arcs() --> {ret!r}") + return ret + + def source(self) -> str: + ret = self.reporter.source() + self.debug.write(f"source() --> {len(ret)} chars") + return ret + + def source_token_lines(self) -> TSourceTokenLines: + ret = list(self.reporter.source_token_lines()) + self.debug.write(f"source_token_lines() --> {len(ret)} tokens") + return ret diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/py.typed b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/py.typed new file mode 100644 index 0000000..bacd23a --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 to indicate that this package has type hints. diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/python.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/python.py new file mode 100644 index 0000000..52002da --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/python.py @@ -0,0 +1,269 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Python source expertise for coverage.py""" + +from __future__ import annotations + +import os.path +import types +import zipimport +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from coverage import env +from coverage.exceptions import CoverageException, NoSource +from coverage.files import canonical_filename, relative_filename, zip_location +from coverage.misc import isolate_module, join_regex +from coverage.parser import PythonParser +from coverage.phystokens import source_encoding, source_token_lines +from coverage.plugin import CodeRegion, FileReporter +from coverage.regions import code_regions +from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +def read_python_source(filename: str) -> bytes: + """Read the Python source text from `filename`. + + Returns bytes. + + """ + with open(filename, "rb") as f: + source = f.read() + + return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") + + +def get_python_source(filename: str) -> str: + """Return the source code, as unicode.""" + base, ext = os.path.splitext(filename) + if ext == ".py" and env.WINDOWS: + exts = [".py", ".pyw"] + else: + exts = [ext] + + source_bytes: bytes | None + for ext in exts: + try_filename = base + ext + if os.path.exists(try_filename): + # A regular text file: open it. + source_bytes = read_python_source(try_filename) + break + + # Maybe it's in a zip file? + source_bytes = get_zip_bytes(try_filename) + if source_bytes is not None: + break + else: + # Couldn't find source. + raise NoSource(f"No source for code: '{filename}'.", slug="no-source") + + # Replace \f because of http://bugs.python.org/issue19035 + source_bytes = source_bytes.replace(b"\f", b" ") + source = source_bytes.decode(source_encoding(source_bytes), "replace") + + # Python code should always end with a line with a newline. + if source and source[-1] != "\n": + source += "\n" + + return source + + +def get_zip_bytes(filename: str) -> bytes | None: + """Get data from `filename` if it is a zip file path. + + Returns the bytestring data read from the zip file, or None if no zip file + could be found or `filename` isn't in it. The data returned will be + an empty string if the file is empty. + + """ + zipfile_inner = zip_location(filename) + if zipfile_inner is not None: + zipfile, inner = zipfile_inner + try: + zi = zipimport.zipimporter(zipfile) + except zipimport.ZipImportError: + return None + try: + data = zi.get_data(inner) + except OSError: + return None + return data + return None + + +def source_for_file(filename: str) -> str: + """Return the source filename for `filename`. + + Given a file name being traced, return the best guess as to the source + file to attribute it to. + + """ + if filename.endswith(".py"): + # .py files are themselves source files. + return filename + + elif filename.endswith((".pyc", ".pyo")): + # Bytecode files probably have source files near them. + py_filename = filename[:-1] + if os.path.exists(py_filename): + # Found a .py file, use that. + return py_filename + if env.WINDOWS: + # On Windows, it could be a .pyw file. + pyw_filename = py_filename + "w" + if os.path.exists(pyw_filename): + return pyw_filename + # Didn't find source, but it's probably the .py file we want. + return py_filename + + # No idea, just use the file name as-is. + return filename + + +def source_for_morf(morf: TMorf) -> str: + """Get the source filename for the module-or-file `morf`.""" + if hasattr(morf, "__file__") and morf.__file__: + filename = morf.__file__ + elif isinstance(morf, types.ModuleType): + # A module should have had .__file__, otherwise we can't use it. + # This could be a PEP-420 namespace package. + raise CoverageException(f"Module {morf} has no file") + else: + filename = morf + + filename = source_for_file(filename) + return filename + + +class PythonFileReporter(FileReporter): + """Report support for a Python file.""" + + def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None: + self.coverage = coverage + + filename = source_for_morf(morf) + + fname = filename + canonicalize = True + if self.coverage is not None: + if self.coverage.config.relative_files: + canonicalize = False + if canonicalize: + fname = canonical_filename(filename) + super().__init__(fname) + + if hasattr(morf, "__name__"): + name = morf.__name__.replace(".", os.sep) + if os.path.basename(filename).startswith("__init__."): + name += os.sep + "__init__" + name += ".py" + else: + name = relative_filename(filename) + self.relname = name + + self._source: str | None = None + self._parser: PythonParser | None = None + self._excluded = None + + def __repr__(self) -> str: + return f"" + + def relative_filename(self) -> str: + return self.relname + + @property + def parser(self) -> PythonParser: + """Lazily create a :class:`PythonParser`.""" + assert self.coverage is not None + if self._parser is None: + self._parser = PythonParser( + filename=self.filename, + exclude=self.coverage._exclude_regex("exclude"), + ) + self._parser.parse_source() + return self._parser + + def lines(self) -> set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.statements + + def multiline_map(self) -> dict[TLineNo, TLineNo]: + """A map of line numbers to first-line in a multi-line statement.""" + return self.parser.multiline_map + + def excluded_lines(self) -> set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.excluded + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + return self.parser.translate_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + return self.parser.translate_arcs(arcs) + + def no_branch_lines(self) -> set[TLineNo]: + assert self.coverage is not None + no_branch = self.parser.lines_matching( + join_regex(self.coverage.config.partial_list + self.coverage.config.partial_always_list) + ) + return no_branch + + def arcs(self) -> set[TArc]: + return self.parser.arcs() + + def exit_counts(self) -> dict[TLineNo, int]: + return self.parser.exit_counts() + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Iterable[TArc] | None = None, + ) -> str: + return self.parser.missing_arc_description(start, end) + + def arc_description(self, start: TLineNo, end: TLineNo) -> str: + return self.parser.arc_description(start, end) + + def source(self) -> str: + if self._source is None: + self._source = get_python_source(self.filename) + return self._source + + def should_be_python(self) -> bool: + """Does it seem like this file should contain Python? + + This is used to decide if a file reported as part of the execution of + a program was really likely to have contained Python in the first + place. + + """ + # Get the file extension. + _, ext = os.path.splitext(self.filename) + + # Anything named *.py* should be Python. + if ext.startswith(".py"): + return True + # A file with no extension should be Python. + if not ext: + return True + # Everything else is probably not Python. + return False + + def source_token_lines(self) -> TSourceTokenLines: + return source_token_lines(self.source()) + + def code_regions(self) -> Iterable[CodeRegion]: + return code_regions(self.source()) + + def code_region_kinds(self) -> Iterable[tuple[str, str]]: + return [ + ("function", "functions"), + ("class", "classes"), + ] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/pytracer.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/pytracer.py new file mode 100644 index 0000000..977d8e0 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/pytracer.py @@ -0,0 +1,369 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import atexit +import dis +import itertools +import sys +import threading +from types import FrameType, ModuleType +from typing import Any, Callable, cast + +from coverage import env +from coverage.types import ( + TArc, + TFileDisposition, + TLineNo, + Tracer, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFileData, + TTraceFn, + TWarnFn, +) + +# I don't understand why, but if we use `cast(set[TLineNo], ...)` inside +# the _trace() function, we get some strange behavior on PyPy 3.10. +# Assigning these names here and using them below fixes the problem. +# See https://github.com/coveragepy/coveragepy/issues/1902 +set_TLineNo = set[TLineNo] +set_TArc = set[TArc] + + +# We need the YIELD_VALUE opcode below, in a comparison-friendly form. +# PYVERSIONS: RESUME is new in Python3.11 +RESUME = dis.opmap.get("RESUME") +RETURN_VALUE = dis.opmap["RETURN_VALUE"] +if RESUME is None: + YIELD_VALUE = dis.opmap["YIELD_VALUE"] + YIELD_FROM = dis.opmap["YIELD_FROM"] + YIELD_FROM_OFFSET = 0 if env.PYPY else 2 +else: + YIELD_VALUE = YIELD_FROM = YIELD_FROM_OFFSET = -1 + +# When running meta-coverage, this file can try to trace itself, which confuses +# everything. Don't trace ourselves. + +THIS_FILE = __file__.rstrip("co") + + +class PyTracer(Tracer): + """Python implementation of the raw data tracer.""" + + # Because of poor implementations of trace-function-manipulating tools, + # the Python trace function must be kept very simple. In particular, there + # must be only one function ever set as the trace function, both through + # sys.settrace, and as the return value from the trace function. Put + # another way, the trace function must always return itself. It cannot + # swap in other functions, or return None to avoid tracing a particular + # frame. + # + # The trace manipulator that introduced this restriction is DecoratorTools, + # which sets a trace function, and then later restores the pre-existing one + # by calling sys.settrace with a function it found in the current frame. + # + # Systems that use DecoratorTools (or similar trace manipulations) must use + # PyTracer to get accurate results. The command-line --timid argument is + # used to force the use of this tracer. + + tracer_ids = itertools.count() + + def __init__(self) -> None: + # Which tracer are we? + self.id = next(self.tracer_ids) + + # Attributes set from the collector: + self.data: TTraceData + self.trace_arcs = False + self.should_trace: TShouldTraceFn + self.should_trace_cache: dict[str, TFileDisposition | None] + self.should_start_context: TShouldStartContextFn | None = None + self.switch_context: Callable[[str | None], None] | None = None + self.lock_data: Callable[[], None] + self.unlock_data: Callable[[], None] + self.warn: TWarnFn + + # The threading module to use, if any. + self.threading: ModuleType | None = None + + self.cur_file_data: TTraceFileData | None = None + self.last_line: TLineNo = 0 + self.cur_file_name: str | None = None + self.context: str | None = None + self.started_context = False + + # The data_stack parallels the Python call stack. Each entry is + # information about an active frame, a four-element tuple: + # [0] The TTraceData for this frame's file. Could be None if we + # aren't tracing this frame. + # [1] The current file name for the frame. None if we aren't tracing + # this frame. + # [2] The last line number executed in this frame. + # [3] Boolean: did this frame start a new context? + self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = [] + self.thread: threading.Thread | None = None + self.stopped = False + self._activity = False + + self.in_atexit = False + # On exit, self.in_atexit = True + atexit.register(setattr, self, "in_atexit", True) + + # Cache a bound method on the instance, so that we don't have to + # re-create a bound method object all the time. + self._cached_bound_method_trace: TTraceFn = self._trace + + def __repr__(self) -> str: + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" + + def log(self, marker: str, *args: Any) -> None: + """For hard-core logging of what this tracer is doing.""" + with open("/tmp/debug_trace.txt", "a", encoding="utf-8") as f: + f.write(f"{marker} {self.id}[{len(self.data_stack)}]") + if 0: # if you want thread ids.. + f.write( # type: ignore[unreachable] + ".{:x}.{:x}".format( + self.thread.ident, + self.threading.current_thread().ident, + ) + ) + f.write(" {}".format(" ".join(map(str, args)))) + if 0: # if you want callers.. + f.write(" | ") # type: ignore[unreachable] + stack = " / ".join( + (fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack + ) + f.write(stack) + f.write("\n") + + def _trace( + self, + frame: FrameType, + event: str, + arg: Any, # pylint: disable=unused-argument + lineno: TLineNo | None = None, # pylint: disable=unused-argument + ) -> TTraceFn | None: + """The trace function passed to sys.settrace.""" + + if THIS_FILE in frame.f_code.co_filename: + return None + + # f = frame; code = f.f_code + # self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event) + + if self.stopped and sys.gettrace() == self._cached_bound_method_trace: # pylint: disable=comparison-with-callable + # The PyTrace.stop() method has been called, possibly by another + # thread, let's deactivate ourselves now. + if 0: + f = frame # type: ignore[unreachable] + self.log("---\nX", f.f_code.co_filename, f.f_lineno) + while f: + self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) + f = f.f_back + sys.settrace(None) + try: + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + except IndexError: + self.log( + "Empty stack!", + frame.f_code.co_filename, + frame.f_lineno, + frame.f_code.co_name, + ) + return None + + # if event != "call" and frame.f_code.co_filename != self.cur_file_name: + # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) + + if event == "call": + # Should we start a new context? + if self.should_start_context and self.context is None: + context_maybe = self.should_start_context(frame) # pylint: disable=not-callable + if context_maybe is not None: + self.context = context_maybe + started_context = True + assert self.switch_context is not None + self.switch_context(self.context) # pylint: disable=not-callable + else: + started_context = False + else: + started_context = False + self.started_context = started_context + + # Entering a new frame. Decide if we should trace in this file. + self._activity = True + self.data_stack.append( + ( + self.cur_file_data, + self.cur_file_name, + self.last_line, + started_context, + ), + ) + + # Improve tracing performance: when calling a function, both caller + # and callee are often within the same file. if that's the case, we + # don't have to re-check whether to trace the corresponding + # function (which is a little bit expensive since it involves + # dictionary lookups). This optimization is only correct if we + # didn't start a context. + filename = frame.f_code.co_filename + if filename != self.cur_file_name or started_context: + self.cur_file_name = filename + disp = self.should_trace_cache.get(filename) + if disp is None: + disp = self.should_trace(filename, frame) + self.should_trace_cache[filename] = disp + + self.cur_file_data = None + if disp.trace: + tracename = disp.source_filename + assert tracename is not None + self.lock_data() + try: + if tracename not in self.data: + self.data[tracename] = set() + finally: + self.unlock_data() + self.cur_file_data = self.data[tracename] + else: + frame.f_trace_lines = False + elif not self.cur_file_data: + frame.f_trace_lines = False + + # The call event is really a "start frame" event, and happens for + # function calls and re-entering generators. The f_lasti field is + # -1 for calls, and a real offset for generators. Use <0 as the + # line number for calls, and the real line number for generators. + if RESUME is not None: + # The current opcode is guaranteed to be RESUME. The argument + # determines what kind of resume it is. + oparg = frame.f_code.co_code[frame.f_lasti + 1] + real_call = (oparg == 0) # fmt: skip + else: + real_call = (getattr(frame, "f_lasti", -1) < 0) # fmt: skip + if real_call: + self.last_line = -frame.f_code.co_firstlineno + else: + self.last_line = frame.f_lineno + + elif event == "line": + # Record an executed line. + if self.cur_file_data is not None: + flineno: TLineNo = frame.f_lineno + + if self.trace_arcs: + cast(set_TArc, self.cur_file_data).add((self.last_line, flineno)) + else: + cast(set_TLineNo, self.cur_file_data).add(flineno) + self.last_line = flineno + + elif event == "return": + if self.trace_arcs and self.cur_file_data: + # Record an arc leaving the function, but beware that a + # "return" event might just mean yielding from a generator. + code = frame.f_code.co_code + lasti = frame.f_lasti + if RESUME is not None: + if len(code) == lasti + 2: + # A return from the end of a code object is a real return. + real_return = True + else: + # It is a real return if we aren't going to resume next. + if env.PYBEHAVIOR.lasti_is_yield: + lasti += 2 + real_return = code[lasti] != RESUME + else: + if code[lasti] == RETURN_VALUE: + real_return = True + elif code[lasti] == YIELD_VALUE: + real_return = False + elif len(code) <= lasti + YIELD_FROM_OFFSET: + real_return = True + elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM: + real_return = False + else: + real_return = True + if real_return: + first = frame.f_code.co_firstlineno + cast(set_TArc, self.cur_file_data).add((self.last_line, -first)) + + # Leaving this function, pop the filename stack. + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + # Leaving a context? + if self.started_context: + assert self.switch_context is not None + self.context = None + self.switch_context(None) # pylint: disable=not-callable + + return self._cached_bound_method_trace + + def start(self) -> TTraceFn: + """Start this Tracer. + + Return a Python function suitable for use with sys.settrace(). + + """ + self.stopped = False + if self.threading: + if self.thread is None: + self.thread = self.threading.current_thread() + + sys.settrace(self._cached_bound_method_trace) + return self._cached_bound_method_trace + + def stop(self) -> None: + """Stop this Tracer.""" + # Get the active tracer callback before setting the stop flag to be + # able to detect if the tracer was changed prior to stopping it. + tf = sys.gettrace() + + # Set the stop flag. The actual call to sys.settrace(None) will happen + # in the self._trace callback itself to make sure to call it from the + # right thread. + self.stopped = True + + if self.threading: + assert self.thread is not None + if self.thread.ident != self.threading.current_thread().ident: + # Called on a different thread than started us: we can't unhook + # ourselves, but we've set the flag that we should stop, so we + # won't do any more tracing. + # self.log("~", "stopping on different threads") + return + + # PyPy clears the trace function before running atexit functions, + # so don't warn if we are in atexit on PyPy and the trace function + # has changed to None. Metacoverage also messes this up, so don't + # warn if we are measuring ourselves. + suppress_warning = (env.PYPY and self.in_atexit and tf is None) or env.METACOV + if self.warn and not suppress_warning: + if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable + self.warn( + "Trace function changed, data is likely wrong: " + + f"{tf!r} != {self._cached_bound_method_trace!r}", + slug="trace-changed", + ) + + def activity(self) -> bool: + """Has there been any activity?""" + return self._activity + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + self._activity = False + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + return None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/regions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/regions.py new file mode 100644 index 0000000..891ae84 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/regions.py @@ -0,0 +1,127 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Find functions and classes in Python code.""" + +from __future__ import annotations + +import ast +import dataclasses +from typing import cast + +from coverage.plugin import CodeRegion + + +@dataclasses.dataclass +class Context: + """The nested named context of a function or class.""" + + name: str + kind: str + lines: set[int] + + +class RegionFinder: + """An ast visitor that will find and track regions of code. + + Functions and classes are tracked by name. Results are in the .regions + attribute. + + """ + + def __init__(self) -> None: + self.regions: list[CodeRegion] = [] + self.context: list[Context] = [] + + def parse_source(self, source: str) -> None: + """Parse `source` and walk the ast to populate the .regions attribute.""" + self.handle_node(ast.parse(source)) + + def fq_node_name(self) -> str: + """Get the current fully qualified name we're processing.""" + return ".".join(c.name for c in self.context) + + def handle_node(self, node: ast.AST) -> None: + """Recursively handle any node.""" + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): + self.handle_FunctionDef(node) + elif isinstance(node, ast.ClassDef): + self.handle_ClassDef(node) + else: + self.handle_node_body(node) + + def handle_node_body(self, node: ast.AST) -> None: + """Recursively handle the nodes in this node's body, if any.""" + for body_node in getattr(node, "body", ()): + self.handle_node(body_node) + + def handle_FunctionDef(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None: + """Called for `def` or `async def`.""" + lines = set(range(node.body[0].lineno, cast(int, node.body[-1].end_lineno) + 1)) + if self.context and self.context[-1].kind == "class": + # Function bodies are part of their enclosing class. + self.context[-1].lines |= lines + # Function bodies should be excluded from the nearest enclosing function. + for ancestor in reversed(self.context): + if ancestor.kind == "function": + ancestor.lines -= lines + break + self.context.append(Context(node.name, "function", lines)) + self.regions.append( + CodeRegion( + kind="function", + name=self.fq_node_name(), + start=node.lineno, + lines=lines, + ) + ) + self.handle_node_body(node) + self.context.pop() + + def handle_ClassDef(self, node: ast.ClassDef) -> None: + """Called for `class`.""" + # The lines for a class are the lines in the methods of the class. + # We start empty, and count on visit_FunctionDef to add the lines it + # finds. + lines: set[int] = set() + self.context.append(Context(node.name, "class", lines)) + self.regions.append( + CodeRegion( + kind="class", + name=self.fq_node_name(), + start=node.lineno, + lines=lines, + ) + ) + self.handle_node_body(node) + self.context.pop() + # Class bodies should be excluded from the enclosing classes. + for ancestor in reversed(self.context): + if ancestor.kind == "class": + ancestor.lines -= lines + + +def code_regions(source: str) -> list[CodeRegion]: + """Find function and class regions in source code. + + Analyzes the code in `source`, and returns a list of :class:`CodeRegion` + objects describing functions and classes as regions of the code:: + + [ + CodeRegion(kind="function", name="func1", start=8, lines={10, 11, 12}), + CodeRegion(kind="function", name="MyClass.method", start=30, lines={34, 35, 36}), + CodeRegion(kind="class", name="MyClass", start=25, lines={34, 35, 36}), + ] + + The line numbers will include comments and blank lines. Later processing + will need to ignore those lines as needed. + + Nested functions and classes are excluded from their enclosing region. No + line should be reported as being part of more than one function, or more + than one class. Lines in methods are reported as being in a function and + in a class. + + """ + rf = RegionFinder() + rf.parse_source(source) + return rf.regions diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report.py new file mode 100644 index 0000000..30fa406 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report.py @@ -0,0 +1,298 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Summary reporting""" + +from __future__ import annotations + +import sys +from collections.abc import Iterable +from typing import IO, TYPE_CHECKING, Any + +from coverage.exceptions import ConfigError, NoDataError +from coverage.misc import human_sorted_items, plural +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class SummaryReporter: + """A reporter for writing the summary report.""" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.branches = coverage.get_data().has_arcs() + self.outfile: IO[str] | None = None + self.output_format = self.config.format or "text" + if self.output_format not in {"text", "markdown", "total"}: + raise ConfigError(f"Unknown report format choice: {self.output_format!r}") + self.fr_analyses: list[tuple[FileReporter, Analysis]] = [] + self.skipped_count = 0 + self.empty_count = 0 + self.total = Numbers(precision=self.config.precision) + + def write(self, line: str) -> None: + """Write a line to the output, adding a newline.""" + assert self.outfile is not None + self.outfile.write(line.rstrip()) + self.outfile.write("\n") + + def write_items(self, items: Iterable[str]) -> None: + """Write a list of strings, joined together.""" + self.write("".join(items)) + + def report_text( + self, + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], + ) -> None: + """Internal method that prints report data in text format. + + `header` is a list with captions. + `lines_values` is list of lists of sortable values. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max([len(line[0]) for line in lines_values] + [5]) + 1 + max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1 + max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values]) + formats = dict( + Name="{:{name_len}}", + Stmts="{:>7}", + Miss="{:>7}", + Branch="{:>7}", + BrPart="{:>7}", + Cover="{:>{n}}", + Missing="{:>10}", + ) + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] + header_str = "".join(header_items) + rule = "-" * len(header_str) + + # Write the header + self.write(header_str) + self.write(rule) + + # Write the data lines + formats.update( + dict( + Cover="{:>{n}}%", + Missing=" {:9}", + ) + ) + for values in lines_values: + self.write_items( + ( + formats[item].format(str(value), name_len=max_name, n=max_n - 1) + for item, value in zip(header, values) + ) + ) + + # Write a TOTAL line + if lines_values: + self.write(rule) + + self.write_items( + ( + formats[item].format(str(value), name_len=max_name, n=max_n - 1) + for item, value in zip(header, total_line) + ) + ) + + for end_line in end_lines: + self.write(end_line) + + def report_markdown( + self, + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], + ) -> None: + """Internal method that prints report data in markdown format. + + `header` is a list with captions. + `lines_values` is a sorted list of lists containing coverage information. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0) + max_name = max(max_name, len("**TOTAL**")) + 1 + formats = dict( + Name="| {:{name_len}}|", + Stmts="{:>9} |", + Miss="{:>9} |", + Branch="{:>9} |", + BrPart="{:>9} |", + Cover="{:>{n}} |", + Missing="{:>10} |", + ) + max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] + header_str = "".join(header_items) + rule_str = "|" + " ".join( + ["- |".rjust(len(header_items[0]) - 1, "-")] + + ["-: |".rjust(len(item) - 1, "-") for item in header_items[1:]], + ) + + # Write the header + self.write(header_str) + self.write(rule_str) + + # Write the data lines + for values in lines_values: + formats.update( + dict( + Cover="{:>{n}}% |", + ) + ) + self.write_items( + ( + formats[item].format( + str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1 + ) + for item, value in zip(header, values) + ) + ) + + # Write the TOTAL line + formats.update( + dict( + Name="|{:>{name_len}} |", + Cover="{:>{n}} |", + ), + ) + total_line_items: list[str] = [] + for item, value in zip(header, total_line): + if value == "": + insert = value + elif item == "Cover": + insert = f" **{value}%**" + else: + insert = f" **{value}**" + total_line_items += formats[item].format(insert, name_len=max_name, n=max_n) + self.write_items(total_line_items) + + for end_line in end_lines: + self.write(end_line) + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: + """Writes a report summarizing coverage statistics per module. + + `outfile` is a text-mode file object to write the summary to. + + """ + self.outfile = outfile or sys.stdout + + self.coverage.get_data().set_query_contexts(self.config.report_contexts) + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.report_one_file(fr, analysis) + + if not self.total.n_files and not self.skipped_count: + raise NoDataError("No data to report.") + + if self.output_format == "total": + self.write(self.total.pc_covered_str) + else: + self.tabular_report() + + return self.total.pc_covered + + def tabular_report(self) -> None: + """Writes tabular report formats.""" + # Prepare the header line and column sorting. + header = ["Name", "Stmts", "Miss"] + if self.branches: + header += ["Branch", "BrPart"] + header += ["Cover"] + if self.config.show_missing: + header += ["Missing"] + + column_order = dict(name=0, stmts=1, miss=2, cover=-1) + if self.branches: + column_order.update(dict(branch=3, brpart=4)) + + # `lines_values` is list of lists of sortable values. + lines_values = [] + + for fr, analysis in self.fr_analyses: + nums = analysis.numbers + args = [fr.relative_filename(), nums.n_statements, nums.n_missing] + if self.branches: + args += [nums.n_branches, nums.n_partial_branches] + args += [nums.pc_covered_str] + if self.config.show_missing: + args += [analysis.missing_formatted(branches=True)] + args += [nums.pc_covered] + lines_values.append(args) + + # Line sorting. + sort_option = (self.config.sort or "name").lower() + reverse = False + if sort_option[0] == "-": + reverse = True + sort_option = sort_option[1:] + elif sort_option[0] == "+": + sort_option = sort_option[1:] + sort_idx = column_order.get(sort_option) + if sort_idx is None: + raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") + if sort_option == "name": + lines_values = human_sorted_items(lines_values, reverse=reverse) + else: + lines_values.sort( + key=lambda line: (line[sort_idx], line[0]), + reverse=reverse, + ) + + # Calculate total if we had at least one file. + total_line = ["TOTAL", self.total.n_statements, self.total.n_missing] + if self.branches: + total_line += [self.total.n_branches, self.total.n_partial_branches] + total_line += [self.total.pc_covered_str] + if self.config.show_missing: + total_line += [""] + + # Create other final lines. + end_lines = [] + if self.config.skip_covered and self.skipped_count: + files = plural(self.skipped_count, "file") + end_lines.append( + f"\n{self.skipped_count} {files} skipped due to complete coverage.", + ) + if self.config.skip_empty and self.empty_count: + files = plural(self.empty_count, "file") + end_lines.append(f"\n{self.empty_count} empty {files} skipped.") + + if self.output_format == "markdown": + formatter = self.report_markdown + else: + formatter = self.report_text + formatter(header, lines_values, total_line, end_lines) + + def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Report on just one file, the callback from report().""" + nums = analysis.numbers + self.total += nums + + no_missing_lines = (nums.n_missing == 0) # fmt: skip + no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip + if self.config.skip_covered and no_missing_lines and no_missing_branches: + # Don't report on 100% files. + self.skipped_count += 1 + elif self.config.skip_empty and nums.n_statements == 0: + # Don't report on empty files. + self.empty_count += 1 + else: + self.fr_analyses.append((fr, analysis)) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report_core.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report_core.py new file mode 100644 index 0000000..6a672a1 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/report_core.py @@ -0,0 +1,117 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Reporter foundation for coverage.py.""" + +from __future__ import annotations + +import sys +from collections.abc import Iterable +from typing import IO, TYPE_CHECKING, Callable, Protocol + +from coverage.exceptions import NoDataError, NotPython +from coverage.files import GlobMatcher, prep_patterns +from coverage.misc import ensure_dir_for_file, file_be_gone +from coverage.plugin import FileReporter +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class Reporter(Protocol): + """What we expect of reporters.""" + + report_type: str + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Generate a report of `morfs`, written to `outfile`.""" + + +def render_report( + output_path: str, + reporter: Reporter, + morfs: Iterable[TMorf] | None, + msgfn: Callable[[str], None], +) -> float: + """Run a one-file report generator, managing the output file. + + This function ensures the output file is ready to be written to. Then writes + the report to it. Then closes the file and cleans up. + + """ + file_to_close = None + delete_file = False + + if output_path == "-": + outfile = sys.stdout + else: + # Ensure that the output directory is created; done here because this + # report pre-opens the output file. HtmlReporter does this on its own + # because its task is more complex, being multiple files. + ensure_dir_for_file(output_path) + outfile = open(output_path, "w", encoding="utf-8") + file_to_close = outfile + delete_file = True + + try: + ret = reporter.report(morfs, outfile=outfile) + if file_to_close is not None: + msgfn(f"Wrote {reporter.report_type} to {output_path}") + delete_file = False + return ret + finally: + if file_to_close is not None: + file_to_close.close() + if delete_file: + file_be_gone(output_path) # pragma: part covered (doesn't return) + + +def get_analysis_to_report( + coverage: Coverage, + morfs: Iterable[TMorf] | None, +) -> Iterable[tuple[FileReporter, Analysis]]: + """Get the files to report on. + + For each morf in `morfs`, if it should be reported on (based on the omit + and include configuration options), yield a pair, the `FileReporter` and + `Analysis` for the morf. + + """ + fr_morfs = coverage._get_file_reporters(morfs) + config = coverage.config + + if config.report_include: + matcher = GlobMatcher(prep_patterns(config.report_include), "report_include") + fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if matcher.match(fr.filename)] + + if config.report_omit: + matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit") + fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if not matcher.match(fr.filename)] + + if not fr_morfs: + raise NoDataError("No data to report.") + + for fr, morf in sorted(fr_morfs): + try: + analysis = coverage._analyze(morf) + except NotPython: + # Only report errors for .py files, and only if we didn't + # explicitly suppress those errors. + # NotPython is only raised by PythonFileReporter, which has a + # should_be_python() method. + if fr.should_be_python(): # type: ignore[attr-defined] + if config.ignore_errors: + msg = f"Couldn't parse Python file '{fr.filename}'" + coverage._warn(msg, slug="couldnt-parse") + else: + raise + except Exception as exc: + if config.ignore_errors: + msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip() + coverage._warn(msg, slug="couldnt-parse") + else: + raise + else: + yield (fr, analysis) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/results.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/results.py new file mode 100644 index 0000000..57f9006 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/results.py @@ -0,0 +1,471 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Results of coverage measurement.""" + +from __future__ import annotations + +import collections +import dataclasses +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from coverage.exceptions import ConfigError +from coverage.misc import nice_pair +from coverage.types import TArc, TLineNo + +if TYPE_CHECKING: + from coverage.data import CoverageData + from coverage.plugin import FileReporter + + +def analysis_from_file_reporter( + data: CoverageData, + precision: int, + file_reporter: FileReporter, + filename: str, +) -> Analysis: + """Create an Analysis from a FileReporter.""" + has_arcs = data.has_arcs() + statements = file_reporter.lines() + excluded = file_reporter.excluded_lines() + executed = file_reporter.translate_lines(data.lines(filename) or []) + + if has_arcs: + arc_possibilities_set = file_reporter.arcs() + arcs: Iterable[TArc] = data.arcs(filename) or [] + arcs = file_reporter.translate_arcs(arcs) + + # Reduce the set of arcs to the ones that could be branches. + dests = collections.defaultdict(set) + for fromno, tono in arc_possibilities_set: + dests[fromno].add(tono) + single_dests = { + fromno: list(tonos)[0] for fromno, tonos in dests.items() if len(tonos) == 1 + } + new_arcs = set() + for fromno, tono in arcs: + if fromno != tono: + new_arcs.add((fromno, tono)) + else: + if fromno in single_dests: + new_arcs.add((fromno, single_dests[fromno])) + + arcs_executed_set = file_reporter.translate_arcs(new_arcs) + exit_counts = file_reporter.exit_counts() + no_branch = file_reporter.no_branch_lines() + else: + arc_possibilities_set = set() + arcs_executed_set = set() + exit_counts = {} + no_branch = set() + + return Analysis( + precision=precision, + filename=filename, + has_arcs=has_arcs, + statements=statements, + excluded=excluded, + executed=executed, + arc_possibilities_set=arc_possibilities_set, + arcs_executed_set=arcs_executed_set, + exit_counts=exit_counts, + no_branch=no_branch, + ) + + +@dataclasses.dataclass +class Analysis: + """The results of analyzing a FileReporter.""" + + precision: int + filename: str + has_arcs: bool + statements: set[TLineNo] + excluded: set[TLineNo] + executed: set[TLineNo] + arc_possibilities_set: set[TArc] + arcs_executed_set: set[TArc] + exit_counts: dict[TLineNo, int] + no_branch: set[TLineNo] + + def __post_init__(self) -> None: + self.arc_possibilities = sorted(self.arc_possibilities_set) + self.arcs_executed = sorted(self.arcs_executed_set) + self.missing = self.statements - self.executed + + if self.has_arcs: + n_branches = self._total_branches() + mba = self.missing_branch_arcs() + n_partial_branches = sum(len(v) for k, v in mba.items() if k not in self.missing) + n_missing_branches = sum(len(v) for k, v in mba.items()) + else: + n_branches = n_partial_branches = n_missing_branches = 0 + + self.numbers = Numbers( + precision=self.precision, + n_files=1, + n_statements=len(self.statements), + n_excluded=len(self.excluded), + n_missing=len(self.missing), + n_branches=n_branches, + n_partial_branches=n_partial_branches, + n_missing_branches=n_missing_branches, + ) + + def missing_formatted(self, branches: bool = False) -> str: + """The missing line numbers, formatted nicely. + + Returns a string like "1-2, 5-11, 13-14". + + If `branches` is true, includes the missing branch arcs also. + + """ + if branches and self.has_arcs: + arcs = self.missing_branch_arcs().items() + else: + arcs = None + + return format_lines(self.statements, self.missing, arcs=arcs) + + def arcs_missing(self) -> list[TArc]: + """Returns a sorted list of the un-executed arcs in the code.""" + missing = ( + p + for p in self.arc_possibilities + if p not in self.arcs_executed_set + and p[0] not in self.no_branch + and p[1] not in self.excluded + ) + return sorted(missing) + + def _branch_lines(self) -> list[TLineNo]: + """Returns a list of line numbers that have more than one exit.""" + return [l1 for l1, count in self.exit_counts.items() if count > 1] + + def _total_branches(self) -> int: + """How many total branches are there?""" + return sum(count for count in self.exit_counts.values() if count > 1) + + def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: + """Return arcs that weren't executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + missing = self.arcs_missing() + branch_lines = set(self._branch_lines()) + mba = collections.defaultdict(list) + for l1, l2 in missing: + assert l1 != l2, f"In {self.filename}, didn't expect {l1} == {l2}" + if l1 in branch_lines: + mba[l1].append(l2) + return mba + + def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: + """Return arcs that were executed from branch lines. + + Only include ones that we considered possible. + + Returns {l1:[l2a,l2b,...], ...} + + """ + branch_lines = set(self._branch_lines()) + eba = collections.defaultdict(list) + for l1, l2 in self.arcs_executed: + assert l1 != l2, f"Oops: Didn't think this could happen: {l1 = }, {l2 = }" + if (l1, l2) not in self.arc_possibilities_set: + continue + if l1 in branch_lines: + eba[l1].append(l2) + return eba + + def branch_stats(self) -> dict[TLineNo, tuple[int, int]]: + """Get stats about branches. + + Returns a dict mapping line numbers to a tuple: + (total_exits, taken_exits). + + """ + + missing_arcs = self.missing_branch_arcs() + stats = {} + for lnum in self._branch_lines(): + exits = self.exit_counts[lnum] + missing = len(missing_arcs[lnum]) + stats[lnum] = (exits, exits - missing) + return stats + + +TRegionLines = frozenset[TLineNo] + + +class AnalysisNarrower: + """ + For reducing an `Analysis` to a subset of its lines. + + Originally this was a simpler method on Analysis, but that led to quadratic + behavior. This class does the bulk of the work up-front to provide the + same results in linear time. + + Create an AnalysisNarrower from an Analysis, bulk-add region lines to it + with `add_regions`, then individually request new narrowed Analysis objects + for each region with `narrow`. Doing most of the work in limited calls to + `add_regions` lets us avoid poor performance. + """ + + # In this class, regions are represented by a frozenset of their lines. + + def __init__(self, analysis: Analysis) -> None: + self.analysis = analysis + self.region2arc_possibilities: dict[TRegionLines, set[TArc]] = collections.defaultdict(set) + self.region2arc_executed: dict[TRegionLines, set[TArc]] = collections.defaultdict(set) + self.region2exit_counts: dict[TRegionLines, dict[TLineNo, int]] = collections.defaultdict( + dict + ) + + def add_regions(self, liness: Iterable[set[TLineNo]]) -> None: + """ + Pre-process a number of sets of line numbers. Later calls to `narrow` + with one of these sets will provide a narrowed Analysis. + """ + if self.analysis.has_arcs: + line2region: dict[TLineNo, TRegionLines] = {} + + for lines in liness: + fzlines = frozenset(lines) + for line in lines: + line2region[line] = fzlines + + def collect_arcs( + arc_set: set[TArc], + region2arcs: dict[TRegionLines, set[TArc]], + ) -> None: + for a, b in arc_set: + if r := line2region.get(a): + region2arcs[r].add((a, b)) + if r := line2region.get(b): + region2arcs[r].add((a, b)) + + collect_arcs(self.analysis.arc_possibilities_set, self.region2arc_possibilities) + collect_arcs(self.analysis.arcs_executed_set, self.region2arc_executed) + + for lno, num in self.analysis.exit_counts.items(): + if r := line2region.get(lno): + self.region2exit_counts[r][lno] = num + + def narrow(self, lines: set[TLineNo]) -> Analysis: + """Create a narrowed Analysis. + + The current analysis is copied to make a new one that only considers + the lines in `lines`. + """ + + # Technically, the set intersections in this method are still O(N**2) + # since this method is called N times, but they're very fast and moving + # them to `add_regions` won't avoid the quadratic time. + + statements = self.analysis.statements & lines + excluded = self.analysis.excluded & lines + executed = self.analysis.executed & lines + + if self.analysis.has_arcs: + fzlines = frozenset(lines) + arc_possibilities_set = self.region2arc_possibilities[fzlines] + arcs_executed_set = self.region2arc_executed[fzlines] + exit_counts = self.region2exit_counts[fzlines] + no_branch = self.analysis.no_branch & lines + else: + arc_possibilities_set = set() + arcs_executed_set = set() + exit_counts = {} + no_branch = set() + + return Analysis( + precision=self.analysis.precision, + filename=self.analysis.filename, + has_arcs=self.analysis.has_arcs, + statements=statements, + excluded=excluded, + executed=executed, + arc_possibilities_set=arc_possibilities_set, + arcs_executed_set=arcs_executed_set, + exit_counts=exit_counts, + no_branch=no_branch, + ) + + +@dataclasses.dataclass +class Numbers: + """The numerical results of measuring coverage. + + This holds the basic statistics from `Analysis`, and is used to roll + up statistics across files. + + """ + + precision: int = 0 + n_files: int = 0 + n_statements: int = 0 + n_excluded: int = 0 + n_missing: int = 0 + n_branches: int = 0 + n_partial_branches: int = 0 + n_missing_branches: int = 0 + + @property + def n_executed(self) -> int: + """Returns the number of executed statements.""" + return self.n_statements - self.n_missing + + @property + def n_executed_branches(self) -> int: + """Returns the number of executed branches.""" + return self.n_branches - self.n_missing_branches + + @property + def pc_covered(self) -> float: + """Returns a single percentage value for coverage.""" + if self.n_statements > 0: + numerator, denominator = self.ratio_covered + pc_cov = (100.0 * numerator) / denominator + else: + pc_cov = 100.0 + return pc_cov + + @property + def pc_covered_str(self) -> str: + """Returns the percent covered, as a string, without a percent sign. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + return display_covered(self.pc_covered, self.precision) + + @property + def ratio_covered(self) -> tuple[int, int]: + """Return a numerator and denominator for the coverage ratio.""" + numerator = self.n_executed + self.n_executed_branches + denominator = self.n_statements + self.n_branches + return numerator, denominator + + def __add__(self, other: Numbers) -> Numbers: + return Numbers( + self.precision, + self.n_files + other.n_files, + self.n_statements + other.n_statements, + self.n_excluded + other.n_excluded, + self.n_missing + other.n_missing, + self.n_branches + other.n_branches, + self.n_partial_branches + other.n_partial_branches, + self.n_missing_branches + other.n_missing_branches, + ) + + def __radd__(self, other: int) -> Numbers: + # Implementing 0+Numbers allows us to sum() a list of Numbers. + assert other == 0 # we only ever call it this way. + return self + + +def display_covered(pc: float, precision: int) -> str: + """Return a displayable total percentage, as a string. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + near0 = 1.0 / 10**precision + if 0 < pc < near0: + pc = near0 + elif (100.0 - near0) < pc < 100: + pc = 100.0 - near0 + else: + pc = round(pc, precision) + return f"{pc:.{precision}f}" + + +def _line_ranges( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], +) -> list[tuple[TLineNo, TLineNo]]: + """Produce a list of ranges for `format_lines`.""" + statements = sorted(statements) + lines = sorted(lines) + + pairs = [] + start: TLineNo | None = None + lidx = 0 + for stmt in statements: + if lidx >= len(lines): + break + if stmt == lines[lidx]: + lidx += 1 + if not start: + start = stmt + end = stmt + elif start: + pairs.append((start, end)) + start = None + if start: + pairs.append((start, end)) + return pairs + + +def format_lines( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], + arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None, +) -> str: + """Nicely format a list of line numbers. + + Format a list of line numbers for printing by coalescing groups of lines as + long as the lines represent consecutive statements. This will coalesce + even if there are gaps between statements. + + For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and + `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". + + Both `lines` and `statements` can be any iterable. All of the elements of + `lines` must be in `statements`, and all of the values must be positive + integers. + + If `arcs` is provided, they are (start,[end,end,end]) pairs that will be + included in the output as long as start isn't in `lines`. + + """ + line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] + if arcs is not None: + line_exits = sorted(arcs) + for line, exits in line_exits: + for ex in sorted(exits): + if line not in lines and ex not in lines: + dest = ex if ex > 0 else "exit" + line_items.append((line, f"{line}->{dest}")) + + ret = ", ".join(t[-1] for t in sorted(line_items)) + return ret + + +def should_fail_under(total: float, fail_under: float, precision: int) -> bool: + """Determine if a total should fail due to fail-under. + + `total` is a float, the coverage measurement total. `fail_under` is the + fail_under setting to compare with. `precision` is the number of digits + to consider after the decimal point. + + Returns True if the total should fail. + + """ + # We can never achieve higher than 100% coverage, or less than zero. + if not (0 <= fail_under <= 100.0): + msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100." + raise ConfigError(msg) + + # Special case for fail_under=100, it must really be 100. + if fail_under == 100.0 and total != 100.0: + return True + + return round(total, precision) < fail_under diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqldata.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqldata.py new file mode 100644 index 0000000..7741e47 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqldata.py @@ -0,0 +1,1153 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""SQLite coverage data.""" + +from __future__ import annotations + +import collections +import datetime +import functools +import glob +import itertools +import os +import random +import socket +import sqlite3 +import string +import sys +import textwrap +import threading +import uuid +import zlib +from collections.abc import Collection, Mapping, Sequence +from typing import Any, Callable, cast + +from coverage.debug import NoDebugging, auto_repr, file_summary +from coverage.exceptions import CoverageException, DataError +from coverage.misc import file_be_gone, isolate_module +from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits +from coverage.sqlitedb import SqliteDb +from coverage.types import AnyCallable, FilePath, TArc, TDebugCtl, TLineNo, TWarnFn +from coverage.version import __version__ + +os = isolate_module(os) + +# If you change the schema: increment the SCHEMA_VERSION and update the +# docs in docs/dbschema.rst by running "make cogdoc". + +SCHEMA_VERSION = 7 + +# Schema versions: +# 1: Released in 5.0a2 +# 2: Added contexts in 5.0a3. +# 3: Replaced line table with line_map table. +# 4: Changed line_map.bitmap to line_map.numbits. +# 5: Added foreign key declarations. +# 6: Key-value in meta. +# 7: line_map -> line_bits + +SCHEMA = """\ +CREATE TABLE coverage_schema ( + -- One row, to record the version of the schema in this db. + version integer +); + +CREATE TABLE meta ( + -- Key-value pairs, to record metadata about the data + key text, + value text, + unique (key) + -- Possible keys: + -- 'has_arcs' boolean -- Is this data recording branches? + -- 'sys_argv' text -- The coverage command line that recorded the data. + -- 'version' text -- The version of coverage.py that made the file. + -- 'when' text -- Datetime when the file was created. +); + +CREATE TABLE file ( + -- A row per file measured. + id integer primary key, + path text, + unique (path) +); + +CREATE TABLE context ( + -- A row per context measured. + id integer primary key, + context text, + unique (context) +); + +CREATE TABLE line_bits ( + -- If recording lines, a row per context per file executed. + -- All of the line numbers for that file/context are in one numbits. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + numbits blob, -- see the numbits functions in coverage.numbits + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id) +); + +CREATE TABLE arc ( + -- If recording branches, a row per context per from/to line transition executed. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + fromno integer, -- line number jumped from. + tono integer, -- line number jumped to. + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id, fromno, tono) +); + +CREATE TABLE tracer ( + -- A row per file indicating the tracer used for that file. + file_id integer primary key, + tracer text, + foreign key (file_id) references file (id) +); +""" + + +def _locked(method: AnyCallable) -> AnyCallable: + """A decorator for methods that should hold self._lock.""" + + @functools.wraps(method) + def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: + if self._debug.should("lock"): + self._debug.write(f"Locking {self._lock!r} for {method.__name__}") + with self._lock: + if self._debug.should("lock"): + self._debug.write(f"Locked {self._lock!r} for {method.__name__}") + return method(self, *args, **kwargs) + + return _wrapped + + +class NumbitsUnionAgg: + """SQLite aggregate function for computing union of numbits.""" + + def __init__(self) -> None: + self.result = b"" + + def step(self, value: bytes) -> None: + """Process one value in the aggregation.""" + self.result = numbits_union(self.result, value) + + def finalize(self) -> bytes: + """Return the final aggregated result.""" + return self.result + + +class CoverageData: + """Manages collected coverage data, including file storage. + + This class is the public supported API to the data that coverage.py + collects during program execution. It includes information about what code + was executed. It does not include information from the analysis phase, to + determine what lines could have been executed, or what lines were not + executed. + + .. note:: + + The data file is currently a SQLite database file, with a + :ref:`documented schema `. The schema is subject to change + though, so be careful about querying it directly. Use this API if you + can to isolate yourself from changes. + + There are a number of kinds of data that can be collected: + + * **lines**: the line numbers of source lines that were executed. + These are always available. + + * **arcs**: pairs of source and destination line numbers for transitions + between source lines. These are only available if branch coverage was + used. + + * **file tracer names**: the module names of the file tracer plugins that + handled each file in the data. + + Lines, arcs, and file tracer names are stored for each source file. File + names in this API are case-sensitive, even on platforms with + case-insensitive file systems. + + A data file either stores lines, or arcs, but not both. + + A data file is associated with the data when the :class:`CoverageData` + is created, using the parameters `basename`, `suffix`, and `no_disk`. The + base name can be queried with :meth:`base_filename`, and the actual file + name being used is available from :meth:`data_filename`. + + To read an existing coverage.py data file, use :meth:`read`. You can then + access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, + or :meth:`file_tracer`. + + The :meth:`has_arcs` method indicates whether arc data is available. You + can get a set of the files in the data with :meth:`measured_files`. As + with most Python containers, you can determine if there is any data at all + by using this object as a boolean value. + + The contexts for each line in a file can be read with + :meth:`contexts_by_lineno`. + + To limit querying to certain contexts, use :meth:`set_query_context` or + :meth:`set_query_contexts`. These will narrow the focus of subsequent + :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set + of all measured context names can be retrieved with + :meth:`measured_contexts`. + + Most data files will be created by coverage.py itself, but you can use + methods here to create data files if you like. The :meth:`add_lines`, + :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways + that are convenient for coverage.py. + + To record data for contexts, use :meth:`set_context` to set a context to + be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. + + To add a source file without any measured data, use :meth:`touch_file`, + or :meth:`touch_files` for a list of such files. + + Write the data to its file with :meth:`write`. + + You can clear the data in memory with :meth:`erase`. Data for specific + files can be removed from the database with :meth:`purge_files`. + + Two data collections can be combined by using :meth:`update` on one + :class:`CoverageData`, passing it the other. + + Data in a :class:`CoverageData` can be serialized and deserialized with + :meth:`dumps` and :meth:`loads`. + + The methods used during the coverage.py collection phase + (:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and + :meth:`add_file_tracers`) are thread-safe. Other methods may not be. + + """ + + def __init__( + self, + basename: FilePath | None = None, + suffix: str | bool | None = None, + no_disk: bool = False, + warn: TWarnFn | None = None, + debug: TDebugCtl | None = None, + ) -> None: + """Create a :class:`CoverageData` object to hold coverage-measured data. + + Arguments: + basename (str): the base name of the data file, defaulting to + ".coverage". This can be a path to a file in another directory. + suffix (str or bool): has the same meaning as the `data_suffix` + argument to :class:`coverage.Coverage`. + no_disk (bool): if True, keep all data in memory, and don't + write any disk file. + warn: a warning callback function, accepting a warning message + argument. + debug: a `DebugControl` object (optional) + + """ + self._no_disk = no_disk + self._basename = os.path.abspath(basename or ".coverage") + self._suffix = suffix + self._warn = warn + self._debug = debug or NoDebugging() + + self._choose_filename() + # Maps filenames to row ids. + self._file_map: dict[str, int] = {} + # Maps thread ids to SqliteDb objects. + self._dbs: dict[int, SqliteDb] = {} + self._pid = os.getpid() + # Synchronize the operations used during collection. + self._lock = threading.RLock() + + # Are we in sync with the data file? + self._have_used = False + + self._has_lines = False + self._has_arcs = False + + self._current_context: str | None = None + self._current_context_id: int | None = None + self._query_context_ids: list[int] | None = None + + __repr__ = auto_repr + + def _debug_dataio(self, msg: str, filename: str) -> None: + """A helper for debug messages which are all similar.""" + if self._debug.should("dataio"): + self._debug.write(f"{msg} {filename!r} ({file_summary(filename)})") + + def _choose_filename(self) -> None: + """Set self._filename based on inited attributes.""" + if self._no_disk: + self._filename = f"file:coverage-{uuid.uuid4()}?mode=memory&cache=shared" + else: + self._filename = self._basename + suffix = filename_suffix(self._suffix) + if suffix: + self._filename += f".{suffix}" + + def _reset(self) -> None: + """Reset our attributes.""" + if not self._no_disk: + self.close() + self._file_map = {} + self._have_used = False + self._current_context_id = None + + def close(self, force: bool = False) -> None: + """Really close all the database objects.""" + if self._debug.should("dataio"): + self._debug.write(f"Closing dbs, force={force}: {self._dbs}") + for db in self._dbs.values(): + db.close(force=force) + self._dbs = {} + + def _open_db(self) -> None: + """Open an existing db file, and read its metadata.""" + self._debug_dataio("Opening data file", self._filename) + self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug, self._no_disk) + self._read_db() + + def _read_db(self) -> None: + """Read the metadata from a database so that we are ready to use it.""" + with self._dbs[threading.get_ident()] as db: + try: + row = db.execute_one("select version from coverage_schema") + assert row is not None + except Exception as exc: + if "no such table: coverage_schema" in str(exc): + self._init_db(db) + else: + raise DataError( + "Data file {!r} doesn't seem to be a coverage data file: {}".format( + self._filename, + exc, + ), + ) from exc + else: + schema_version = row[0] + if schema_version != SCHEMA_VERSION: + raise DataError( + "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( + self._filename, + schema_version, + SCHEMA_VERSION, + ), + ) + + row = db.execute_one("select value from meta where key = 'has_arcs'") + if row is not None: + self._has_arcs = bool(int(row[0])) + self._has_lines = not self._has_arcs + + with db.execute("select id, path from file") as cur: + for file_id, path in cur: + self._file_map[path] = file_id + + def _init_db(self, db: SqliteDb) -> None: + """Write the initial contents of the database.""" + self._debug_dataio("Initing data file", self._filename) + db.executescript(SCHEMA) + db.execute_void("INSERT INTO coverage_schema (version) VALUES (?)", (SCHEMA_VERSION,)) + + # When writing metadata, avoid information that will needlessly change + # the hash of the data file, unless we're debugging processes. + meta_data = [ + ("version", __version__), + ] + if self._debug.should("process"): + meta_data.extend( + [ + ("sys_argv", str(getattr(sys, "argv", None))), + ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + ] + ) + db.executemany_void("INSERT OR IGNORE INTO meta (key, value) VALUES (?, ?)", meta_data) + + def _connect(self) -> SqliteDb: + """Get the SqliteDb object to use.""" + if threading.get_ident() not in self._dbs: + self._open_db() + return self._dbs[threading.get_ident()] + + def __bool__(self) -> bool: + if threading.get_ident() not in self._dbs and not os.path.exists(self._filename): + return False + try: + with self._connect() as con: + with con.execute("SELECT * FROM file LIMIT 1") as cur: + return bool(list(cur)) + except CoverageException: + return False + + def dumps(self) -> bytes: + """Serialize the current data to a byte string. + + The format of the serialized data is not documented. It is only + suitable for use with :meth:`loads` in the same version of + coverage.py. + + Note that this serialization is not what gets stored in coverage data + files. This method is meant to produce bytes that can be transmitted + elsewhere and then deserialized with :meth:`loads`. + + Returns: + A byte string of serialized data. + + .. versionadded:: 5.0 + + """ + self._debug_dataio("Dumping data from data file", self._filename) + with self._connect() as con: + script = con.dump() + return b"z" + zlib.compress(script.encode("utf-8")) + + def loads(self, data: bytes) -> None: + """Deserialize data from :meth:`dumps`. + + Use with a newly-created empty :class:`CoverageData` object. It's + undefined what happens if the object already has data in it. + + Note that this is not for reading data from a coverage data file. It + is only for use on data you produced with :meth:`dumps`. + + Arguments: + data: A byte string of serialized data produced by :meth:`dumps`. + + .. versionadded:: 5.0 + + """ + self._debug_dataio("Loading data into data file", self._filename) + if data[:1] != b"z": + raise DataError( + f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)", + ) + script = zlib.decompress(data[1:]).decode("utf-8") + self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug, self._no_disk) + with db: + db.executescript(script) + self._read_db() + self._have_used = True + + def _file_id(self, filename: str, add: bool = False) -> int | None: + """Get the file id for `filename`. + + If filename is not in the database yet, add it if `add` is True. + If `add` is not True, return None. + """ + if filename not in self._file_map: + if add: + with self._connect() as con: + self._file_map[filename] = con.execute_for_rowid( + "INSERT OR REPLACE INTO file (path) VALUES (?)", + (filename,), + ) + return self._file_map.get(filename) + + def _context_id(self, context: str) -> int | None: + """Get the id for a context.""" + assert context is not None + self._start_using() + with self._connect() as con: + row = con.execute_one("SELECT id FROM context WHERE context = ?", (context,)) + if row is not None: + return cast(int, row[0]) + else: + return None + + @_locked + def set_context(self, context: str | None) -> None: + """Set the current context for future :meth:`add_lines` etc. + + `context` is a str, the name of the context to use for the next data + additions. The context persists until the next :meth:`set_context`. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Setting coverage context: {context!r}") + self._current_context = context + self._current_context_id = None + + def _set_context_id(self) -> None: + """Use the _current_context to set _current_context_id.""" + context = self._current_context or "" + context_id = self._context_id(context) + if context_id is not None: + self._current_context_id = context_id + else: + with self._connect() as con: + self._current_context_id = con.execute_for_rowid( + "INSERT INTO context (context) VALUES (?)", + (context,), + ) + + def base_filename(self) -> str: + """The base filename for storing data. + + .. versionadded:: 5.0 + + """ + return self._basename + + def data_filename(self) -> str: + """Where is the data stored? + + .. versionadded:: 5.0 + + """ + return self._filename + + @_locked + def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: + """Add measured line data. + + `line_data` is a dictionary mapping file names to iterables of ints:: + + { filename: { line1, line2, ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write( + "Adding lines: %d files, %d lines total" + % ( + len(line_data), + sum(len(lines) for lines in line_data.values()), + ) + ) + if self._debug.should("dataop2"): + for filename, linenos in sorted(line_data.items()): + self._debug.write(f" {filename}: {linenos}") + self._start_using() + self._choose_lines_or_arcs(lines=True) + if not line_data: + return + with self._connect() as con: + self._set_context_id() + for filename, linenos in line_data.items(): + line_bits = nums_to_numbits(linenos) + file_id = self._file_id(filename, add=True) + query = "SELECT numbits FROM line_bits WHERE file_id = ? AND context_id = ?" + with con.execute(query, (file_id, self._current_context_id)) as cur: + existing = list(cur) + if existing: + line_bits = numbits_union(line_bits, existing[0][0]) + + con.execute_void( + """ + INSERT OR REPLACE INTO line_bits + (file_id, context_id, numbits) VALUES (?, ?, ?) + """, + (file_id, self._current_context_id, line_bits), + ) + + @_locked + def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: + """Add measured arc data. + + `arc_data` is a dictionary mapping file names to iterables of pairs of + ints:: + + { filename: { (l1,l2), (l1,l2), ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write( + "Adding arcs: %d files, %d arcs total" + % ( + len(arc_data), + sum(len(arcs) for arcs in arc_data.values()), + ) + ) + if self._debug.should("dataop2"): + for filename, arcs in sorted(arc_data.items()): + self._debug.write(f" {filename}: {arcs}") + self._start_using() + self._choose_lines_or_arcs(arcs=True) + if not arc_data: + return + with self._connect() as con: + self._set_context_id() + for filename, arcs in arc_data.items(): + if not arcs: + continue + file_id = self._file_id(filename, add=True) + data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] + con.executemany_void( + """ + INSERT OR IGNORE INTO arc + (file_id, context_id, fromno, tono) VALUES (?, ?, ?, ?) + """, + data, + ) + + def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None: + """Force the data file to choose between lines and arcs.""" + assert lines or arcs + assert not (lines and arcs) + if lines and self._has_arcs: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add line measurements to existing branch data") + raise DataError("Can't add line measurements to existing branch data") + if arcs and self._has_lines: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add branch measurements to existing line data") + raise DataError("Can't add branch measurements to existing line data") + if not self._has_arcs and not self._has_lines: + self._has_lines = lines + self._has_arcs = arcs + with self._connect() as con: + con.execute_void( + "INSERT OR IGNORE INTO meta (key, value) VALUES (?, ?)", + ("has_arcs", str(int(arcs))), + ) + + @_locked + def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: + """Add per-file plugin information. + + `file_tracers` is { filename: plugin_name, ... } + + """ + if self._debug.should("dataop"): + self._debug.write(f"Adding file tracers: {len(file_tracers)} files") + if not file_tracers: + return + self._start_using() + with self._connect() as con: + for filename, plugin_name in file_tracers.items(): + file_id = self._file_id(filename, add=True) + existing_plugin = self.file_tracer(filename) + if existing_plugin: + if existing_plugin != plugin_name: + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + filename, + existing_plugin, + plugin_name, + ), + ) + elif plugin_name: + con.execute_void( + "INSERT INTO TRACER (file_id, tracer) VALUES (?, ?)", + (file_id, plugin_name), + ) + + def touch_file(self, filename: str, plugin_name: str = "") -> None: + """Ensure that `filename` appears in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for this file. + It is used to associate the right filereporter, etc. + """ + self.touch_files([filename], plugin_name) + + def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None: + """Ensure that `filenames` appear in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for these files. + It is used to associate the right filereporter, etc. + """ + if self._debug.should("dataop"): + self._debug.write(f"Touching {filenames!r}") + self._start_using() + with self._connect(): # Use this to get one transaction. + if not self._has_arcs and not self._has_lines: + raise DataError("Can't touch files in an empty CoverageData") + + for filename in filenames: + self._file_id(filename, add=True) + if plugin_name: + # Set the tracer for this file + self.add_file_tracers({filename: plugin_name}) + + def purge_files(self, filenames: Collection[str]) -> None: + """Purge any existing coverage data for the given `filenames`. + + .. versionadded:: 7.2 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Purging data for {filenames!r}") + self._start_using() + with self._connect() as con: + if self._has_lines: + sql = "DELETE FROM line_bits WHERE file_id=?" + elif self._has_arcs: + sql = "DELETE FROM arc WHERE file_id=?" + else: + raise DataError("Can't purge files in an empty CoverageData") + + for filename in filenames: + file_id = self._file_id(filename, add=False) + if file_id is None: + continue + con.execute_void(sql, (file_id,)) + + def update( + self, + other_data: CoverageData, + map_path: Callable[[str], str] | None = None, + ) -> None: + """Update this data with data from another :class:`CoverageData`. + + If `map_path` is provided, it's a function that re-map paths to match + the local machine's. Note: `map_path` is None only when called + directly from the test suite. + + """ + if self._debug.should("dataop"): + self._debug.write( + "Updating with data from {!r}".format( + getattr(other_data, "_filename", "???"), + ) + ) + if self._has_lines and other_data._has_arcs: + raise DataError( + "Can't combine branch coverage data with statement data", slug="cant-combine" + ) + if self._has_arcs and other_data._has_lines: + raise DataError( + "Can't combine statement coverage data with branch data", slug="cant-combine" + ) + + map_path = map_path or (lambda p: p) + + # Force the database we're writing to to exist before we start nesting contexts. + self._start_using() + other_data.read() + + # Ensure other_data has a properly initialized database + with other_data._connect(): + pass + + with self._connect() as con: + assert con.con is not None + con.con.isolation_level = "IMMEDIATE" + + # Register functions for SQLite + con.con.create_function("numbits_union", 2, numbits_union) + con.con.create_function("map_path", 1, map_path) + con.con.create_aggregate( + "numbits_union_agg", + 1, + NumbitsUnionAgg, # type: ignore[arg-type] + ) + + # Attach the other database + con.execute_void("ATTACH DATABASE ? AS other_db", (other_data.data_filename(),)) + + # Create temporary table with mapped file paths to avoid repeated map_path() calls + con.execute_void(""" + CREATE TEMP TABLE other_file_mapped AS + SELECT + other_file.id as other_file_id, + map_path(other_file.path) as mapped_path + FROM other_db.file AS other_file + """) + + # Check for tracer conflicts before proceeding + with con.execute(""" + SELECT other_file_mapped.mapped_path, + COALESCE(main.tracer.tracer, ''), + COALESCE(other_db.tracer.tracer, '') + FROM main.file + LEFT JOIN main.tracer ON main.file.id = main.tracer.file_id + INNER JOIN other_file_mapped ON main.file.path = other_file_mapped.mapped_path + LEFT JOIN other_db.tracer ON other_file_mapped.other_file_id = other_db.tracer.file_id + WHERE COALESCE(main.tracer.tracer, '') != COALESCE(other_db.tracer.tracer, '') + """) as cur: + conflicts = list(cur) + if conflicts: + path, this_tracer, other_tracer = conflicts[0] + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + path, + this_tracer, + other_tracer, + ), + ) + + # Insert missing files from other_db (with map_path applied) + con.execute_void(""" + INSERT OR IGNORE INTO main.file (path) + SELECT DISTINCT mapped_path FROM other_file_mapped + """) + + # Insert missing contexts from other_db + con.execute_void(""" + INSERT OR IGNORE INTO main.context (context) + SELECT context FROM other_db.context + """) + + # Update file_map with any new files + with con.execute("SELECT id, path FROM file") as cur: + self._file_map.update({path: id for id, path in cur}) + + with con.execute(""" + SELECT + EXISTS(SELECT 1 FROM other_db.arc), + EXISTS(SELECT 1 FROM other_db.line_bits) + """) as cur: + has_arcs, has_lines = cur.fetchone() + + # Handle arcs if present in other_db + if has_arcs: + self._choose_lines_or_arcs(arcs=True) + + # Create context mapping table for faster lookups + con.execute_void(""" + CREATE TEMP TABLE context_mapping AS + SELECT + other_context.id as other_id, + main_context.id as main_id + FROM other_db.context AS other_context + INNER JOIN main.context AS main_context ON other_context.context = main_context.context + """) + + con.execute_void(""" + INSERT OR IGNORE INTO main.arc (file_id, context_id, fromno, tono) + SELECT + main_file.id, + context_mapping.main_id, + other_arc.fromno, + other_arc.tono + FROM other_db.arc AS other_arc + INNER JOIN other_file_mapped ON other_arc.file_id = other_file_mapped.other_file_id + INNER JOIN context_mapping ON other_arc.context_id = context_mapping.other_id + INNER JOIN main.file AS main_file ON other_file_mapped.mapped_path = main_file.path + """) + + # Handle line_bits if present in other_db + if has_lines: + self._choose_lines_or_arcs(lines=True) + + # Handle line_bits by aggregating other_db data by mapped target, + # then inserting/updating + con.execute_void(""" + INSERT OR REPLACE INTO main.line_bits (file_id, context_id, numbits) + SELECT + main_file.id, + main_context.id, + numbits_union( + COALESCE(( + SELECT numbits FROM main.line_bits + WHERE file_id = main_file.id AND context_id = main_context.id + ), X''), + aggregated.combined_numbits + ) + FROM ( + SELECT + other_file_mapped.mapped_path, + other_context.context, + numbits_union_agg(other_line_bits.numbits) as combined_numbits + FROM other_db.line_bits AS other_line_bits + INNER JOIN other_file_mapped ON other_line_bits.file_id = other_file_mapped.other_file_id + INNER JOIN other_db.context AS other_context ON other_line_bits.context_id = other_context.id + GROUP BY other_file_mapped.mapped_path, other_context.context + ) AS aggregated + INNER JOIN main.file AS main_file ON aggregated.mapped_path = main_file.path + INNER JOIN main.context AS main_context ON aggregated.context = main_context.context + """) + + # Insert tracers from other_db (avoiding conflicts we already checked) + con.execute_void(""" + INSERT OR IGNORE INTO main.tracer (file_id, tracer) + SELECT + main_file.id, + other_tracer.tracer + FROM other_db.tracer AS other_tracer + INNER JOIN other_file_mapped ON other_tracer.file_id = other_file_mapped.other_file_id + INNER JOIN main.file AS main_file ON other_file_mapped.mapped_path = main_file.path + """) + + if not self._no_disk: + # Update all internal cache data. + self._reset() + self.read() + + def erase(self, parallel: bool = False) -> None: + """Erase the data in this object. + + If `parallel` is true, then also deletes data files created from the + basename by parallel-mode. + + """ + self._reset() + if self._no_disk: + return + self._debug_dataio("Erasing data file", self._filename) + file_be_gone(self._filename) + if parallel: + data_dir, local = os.path.split(self._filename) + local_abs_path = os.path.join(os.path.abspath(data_dir), local) + pattern = glob.escape(local_abs_path) + ".*" + for filename in glob.glob(pattern): + self._debug_dataio("Erasing parallel data file", filename) + file_be_gone(filename) + + def read(self) -> None: + """Start using an existing data file.""" + if os.path.exists(self._filename): + with self._connect(): + self._have_used = True + + def write(self) -> None: + """Ensure the data is written to the data file.""" + self._debug_dataio("Writing (no-op) data file", self._filename) + + def _start_using(self) -> None: + """Call this before using the database at all.""" + if self._pid != os.getpid(): + # Looks like we forked! Have to start a new data file. + self._reset() + self._choose_filename() + self._pid = os.getpid() + if not self._have_used: + self.erase() + self._have_used = True + + def has_arcs(self) -> bool: + """Does the database have arcs (True) or lines (False).""" + return bool(self._has_arcs) + + def measured_files(self) -> set[str]: + """A set of all files that have been measured. + + Note that a file may be mentioned as measured even though no lines or + arcs for that file are present in the data. + + """ + return set(self._file_map) + + def measured_contexts(self) -> set[str]: + """A set of all contexts that have been measured. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("SELECT DISTINCT(context) FROM context") as cur: + contexts = {row[0] for row in cur} + return contexts + + def file_tracer(self, filename: str) -> str | None: + """Get the plugin name of the file tracer for a file. + + Returns the name of the plugin that handles this file. If the file was + measured, but didn't use a plugin, then "" is returned. If the file + was not measured, then None is returned. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + row = con.execute_one("SELECT tracer FROM tracer WHERE file_id = ?", (file_id,)) + if row is not None: + return row[0] or "" + return "" # File was measured, but no tracer associated. + + def set_query_context(self, context: str) -> None: + """Set a context for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to only one context. `context` is a string which + must match a context exactly. If it does not, no exception is raised, + but queries will return no data. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("SELECT id FROM context WHERE context = ?", (context,)) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + + def set_query_contexts(self, contexts: Sequence[str] | None) -> None: + """Set a number of contexts for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to the specified contexts. `contexts` is a list + of Python regular expressions. Contexts will be matched using + :func:`re.search `. Data will be included in query + results if they are part of any of the contexts matched. + + .. versionadded:: 5.0 + + """ + self._start_using() + if contexts: + with self._connect() as con: + context_clause = " or ".join(["context REGEXP ?"] * len(contexts)) + with con.execute("SELECT id FROM context WHERE " + context_clause, contexts) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + else: + self._query_context_ids = None + + def lines(self, filename: str) -> list[TLineNo] | None: + """Get the list of lines executed for a source file. + + If the file was not measured, returns None. A file might be measured, + and have no lines executed, in which case an empty list is returned. + + If the file was executed, returns a list of integers, the line numbers + executed in the file. The list is in no particular order. + + """ + self._start_using() + if self.has_arcs(): + arcs = self.arcs(filename) + if arcs is not None: + all_lines = itertools.chain.from_iterable(arcs) + return list({l for l in all_lines if l > 0}) + + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "SELECT numbits FROM line_bits WHERE file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " AND context_id IN (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + bitmaps = list(cur) + nums = set() + for row in bitmaps: + nums.update(numbits_to_nums(row[0])) + return list(nums) + + def arcs(self, filename: str) -> list[TArc] | None: + """Get the list of arcs executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no arcs executed, in which case an empty list is returned. + + If the file was executed, returns a list of 2-tuples of integers. Each + pair is a starting line number and an ending line number for a + transition from one line to another. The list is in no particular + order. + + Negative numbers have special meaning. If the starting line number is + -N, it represents an entry to the code object that starts at line N. + If the ending ling number is -N, it's an exit from the code object that + starts at line N. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "SELECT DISTINCT fromno, tono FROM arc WHERE file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " AND context_id IN (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + return list(cur) + + def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: + """Get the contexts for each line in a file. + + Returns: + A dict mapping line numbers to a list of context names. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return {} + + lineno_contexts_map = collections.defaultdict(set) + if self.has_arcs(): + query = """ + SELECT arc.fromno, arc.tono, context.context + FROM arc, context + WHERE arc.file_id = ? AND arc.context_id = context.id + """ + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " AND arc.context_id IN (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for fromno, tono, context in cur: + if fromno > 0: + lineno_contexts_map[fromno].add(context) + if tono > 0: + lineno_contexts_map[tono].add(context) + else: + query = """ + SELECT l.numbits, c.context FROM line_bits l, context c + WHERE l.context_id = c.id + AND file_id = ? + """ + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " AND l.context_id IN (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for numbits, context in cur: + for lineno in numbits_to_nums(numbits): + lineno_contexts_map[lineno].add(context) + + return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} + + @classmethod + def sys_info(cls) -> list[tuple[str, Any]]: + """Our information for `Coverage.sys_info`. + + Returns a list of (key, value) pairs. + + """ + with SqliteDb(":memory:", debug=NoDebugging()) as db: + with db.execute("PRAGMA temp_store") as cur: + temp_store = [row[0] for row in cur] + with db.execute("PRAGMA compile_options") as cur: + copts = [row[0] for row in cur] + copts = textwrap.wrap(", ".join(copts), width=75) + + return [ + ("sqlite3_sqlite_version", sqlite3.sqlite_version), + ("sqlite3_temp_store", temp_store), + ("sqlite3_compile_options", copts), + ] + + +def filename_suffix(suffix: str | bool | None) -> str | None: + """Compute a filename suffix for a data file. + + If `suffix` is a string or None, simply return it. If `suffix` is True, + then build a suffix incorporating the hostname, process id, and a random + number. + + Returns a string or None. + + """ + if suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + die = random.Random(os.urandom(8)) + letters = string.ascii_uppercase + string.ascii_lowercase + rolls = "".join(die.choice(letters) for _ in range(6)) + suffix = f"{socket.gethostname()}.{os.getpid()}.X{rolls}x" + elif suffix is False: + suffix = None + return suffix diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqlitedb.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqlitedb.py new file mode 100644 index 0000000..b05b7f1 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sqlitedb.py @@ -0,0 +1,239 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""SQLite abstraction for coverage.py""" + +from __future__ import annotations + +import contextlib +import re +import sqlite3 +from collections.abc import Iterable, Iterator +from typing import Any, cast + +from coverage.debug import auto_repr, clipped_repr, exc_one_line +from coverage.exceptions import DataError +from coverage.types import TDebugCtl + + +class SqliteDb: + """A simple abstraction over a SQLite database. + + Use as a context manager, then you can use it like a + :class:`python:sqlite3.Connection` object:: + + with SqliteDb(filename, debug_control) as db: + with db.execute("select a, b from some_table") as cur: + for a, b in cur: + etc(a, b) + + """ + + def __init__(self, filename: str, debug: TDebugCtl, no_disk: bool = False) -> None: + self.debug = debug + self.filename = filename + self.no_disk = no_disk + self.nest = 0 + self.con: sqlite3.Connection | None = None + + __repr__ = auto_repr + + def _connect(self) -> None: + """Connect to the db and do universal initialization.""" + if self.con is not None: + return + + # It can happen that Python switches threads while the tracer writes + # data. The second thread will also try to write to the data, + # effectively causing a nested context. However, given the idempotent + # nature of the tracer operations, sharing a connection among threads + # is not a problem. + if self.debug.should("sql"): + self.debug.write(f"Connecting to {self.filename!r}") + try: + # Use uri=True when connecting to memory URIs + if self.filename.startswith("file:"): + self.con = sqlite3.connect(self.filename, check_same_thread=False, uri=True) + else: + self.con = sqlite3.connect(self.filename, check_same_thread=False) + except sqlite3.Error as exc: + raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc + + if self.debug.should("sql"): + self.debug.write(f"Connected to {self.filename!r} as {self.con!r}") + + self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None) + + # Turning off journal_mode can speed up writing. It can't always be + # disabled, so we have to be prepared for *-journal files elsewhere. + # In Python 3.12+, we can change the config to allow journal_mode=off. + if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"): + # Turn off defensive mode, so that journal_mode=off can succeed. + self.con.setconfig( # type: ignore[attr-defined, unused-ignore] + sqlite3.SQLITE_DBCONFIG_DEFENSIVE, + False, + ) + + # This pragma makes writing faster. It disables rollbacks, but we never need them. + self.execute_void("pragma journal_mode=off") + + # This pragma makes writing faster. It can fail in unusual situations + # (https://github.com/coveragepy/coveragepy/issues/1646), so use fail_ok=True + # to keep things going. + self.execute_void("pragma synchronous=off", fail_ok=True) + + def close(self, force: bool = False) -> None: + """If needed, close the connection.""" + if self.con is not None: + if force or not self.no_disk: + if self.debug.should("sql"): + self.debug.write(f"Closing {self.con!r} on {self.filename!r}") + self.con.close() + self.con = None + + def __enter__(self) -> SqliteDb: + if self.nest == 0: + self._connect() + assert self.con is not None + self.con.__enter__() + self.nest += 1 + return self + + def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] + self.nest -= 1 + if self.nest == 0: + try: + assert self.con is not None + self.con.__exit__(exc_type, exc_value, traceback) + self.close() + except Exception as exc: + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}") + raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc + + def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.execute`.""" + if self.debug.should("sql"): + tail = f" with {parameters!r}" if parameters else "" + self.debug.write(f"Executing {sql!r}{tail}") + try: + assert self.con is not None + try: + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/coveragepy/coveragepy/issues/1010 + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except sqlite3.Error as exc: + msg = str(exc) + if not self.no_disk: + try: + # `execute` is the first thing we do with the database, so try + # hard to provide useful hints if something goes wrong now. + with open(self.filename, "rb") as bad_file: + cov4_sig = b"!coverage.py: This is a private format" + if bad_file.read(len(cov4_sig)) == cov4_sig: + msg = ( + "Looks like a coverage 4.x data file. " + + "Are you mixing versions of coverage?" + ) + except Exception: + pass + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}") + raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc + + @contextlib.contextmanager + def execute( + self, + sql: str, + parameters: Iterable[Any] = (), + ) -> Iterator[sqlite3.Cursor]: + """Context managed :meth:`python:sqlite3.Connection.execute`. + + Use with a ``with`` statement to auto-close the returned cursor. + """ + cur = self._execute(sql, parameters) + try: + yield cur + finally: + cur.close() + + def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None: + """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor. + + If `fail_ok` is True, then SQLite errors are ignored. + """ + try: + # PyPy needs the .close() calls here, or sqlite gets twisted up: + # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on + self._execute(sql, parameters).close() + except DataError: + if not fail_ok: + raise + + def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int: + """Like execute, but returns the lastrowid.""" + with self.execute(sql, parameters) as cur: + assert cur.lastrowid is not None + rowid: int = cur.lastrowid + if self.debug.should("sqldata"): + self.debug.write(f"Row id result: {rowid!r}") + return rowid + + def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None: + """Execute a statement and return the one row that results. + + This is like execute(sql, parameters).fetchone(), except it is + correct in reading the entire result set. This will raise an + exception if more than one row results. + + Returns a row, or None if there were no rows. + """ + with self.execute(sql, parameters) as cur: + rows = list(cur) + if len(rows) == 0: + return None + elif len(rows) == 1: + return cast(tuple[Any, ...], rows[0]) + else: + raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") + + def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.executemany`.""" + if self.debug.should("sql"): + final = ":" if self.debug.should("sqldata") else "" + self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}") + if self.debug.should("sqldata"): + for i, row in enumerate(data): + self.debug.write(f"{i:4d}: {row!r}") + assert self.con is not None + try: + return self.con.executemany(sql, data) + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/coveragepy/coveragepy/issues/1010 + return self.con.executemany(sql, data) + + def executemany_void(self, sql: str, data: list[Any]) -> None: + """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" + self._executemany(sql, data).close() + + def executescript(self, script: str) -> None: + """Same as :meth:`python:sqlite3.Connection.executescript`.""" + if self.debug.should("sql"): + self.debug.write( + "Executing script with {} chars: {}".format( + len(script), + clipped_repr(script, 100), + ) + ) + assert self.con is not None + self.con.executescript(script).close() + + def dump(self) -> str: + """Return a multi-line string, the SQL dump of the database.""" + assert self.con is not None + return "\n".join(self.con.iterdump()) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sysmon.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sysmon.py new file mode 100644 index 0000000..3696500 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/sysmon.py @@ -0,0 +1,482 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Callback functions and support for sys.monitoring data collection.""" + +from __future__ import annotations + +import functools +import inspect +import os +import os.path +import sys +import threading +import traceback +from dataclasses import dataclass +from types import CodeType +from typing import Any, Callable, NewType, Optional, cast + +from coverage import env +from coverage.bytecode import TBranchTrails, always_jumps, branch_trails +from coverage.debug import short_filename, short_stack +from coverage.exceptions import NotPython +from coverage.misc import isolate_module +from coverage.parser import PythonParser +from coverage.types import ( + AnyCallable, + TFileDisposition, + TLineNo, + TOffset, + Tracer, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFileData, + TWarnFn, +) + +# Only needed for some of the commented-out logging: +# from coverage.debug import ppformat + +os = isolate_module(os) + +# pylint: disable=unused-argument + +# $set_env.py: COVERAGE_SYSMON_LOG - Log sys.monitoring activity +LOG = bool(int(os.getenv("COVERAGE_SYSMON_LOG", 0))) + +# $set_env.py: COVERAGE_SYSMON_STATS - Collect sys.monitoring stats +COLLECT_STATS = bool(int(os.getenv("COVERAGE_SYSMON_STATS", 0))) + +# This module will be imported in all versions of Python, but only used in 3.12+ +# It will be type-checked for 3.12, but not for earlier versions. +sys_monitoring = getattr(sys, "monitoring", None) + +DISABLE_TYPE = NewType("DISABLE_TYPE", object) +MonitorReturn = Optional[DISABLE_TYPE] +DISABLE = cast(MonitorReturn, getattr(sys_monitoring, "DISABLE", None)) + + +if LOG: # pragma: debugging + + class LoggingWrapper: + """Wrap a namespace to log all its functions.""" + + def __init__(self, wrapped: Any, namespace: str) -> None: + self.wrapped = wrapped + self.namespace = namespace + + def __getattr__(self, name: str) -> Callable[..., Any]: + def _wrapped(*args: Any, **kwargs: Any) -> Any: + log(f"{self.namespace}.{name}{args}{kwargs}") + return getattr(self.wrapped, name)(*args, **kwargs) + + return _wrapped + + sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring") + assert sys_monitoring is not None + + short_stack = functools.partial( + short_stack, + full=True, + short_filenames=True, + frame_ids=True, + ) + seen_threads: set[int] = set() + + def log(msg: str) -> None: + """Write a message to our detailed debugging log(s).""" + # Thread ids are reused across processes? + # Make a shorter number more likely to be unique. + pid = os.getpid() + tid = cast(int, threading.current_thread().ident) + tslug = f"{(pid * tid) % 9_999_991:07d}" + if tid not in seen_threads: + seen_threads.add(tid) + log(f"New thread {tid} {tslug}:\n{short_stack()}") + # log_seq = int(os.getenv("PANSEQ", "0")) + # root = f"/tmp/pan.{log_seq:03d}" + for filename in [ + "/tmp/foo.out", + # f"{root}.out", + # f"{root}-{pid}.out", + # f"{root}-{pid}-{tslug}.out", + ]: + with open(filename, "a", encoding="utf-8") as f: + try: + print(f"{pid}:{tslug}: {msg}", file=f, flush=True) + except UnicodeError: + print(f"{pid}:{tslug}: {ascii(msg)}", file=f, flush=True) + + def arg_repr(arg: Any) -> str: + """Make a customized repr for logged values.""" + if isinstance(arg, CodeType): + return ( + f"" + ) + return repr(arg) + + def panopticon(*names: str | None) -> AnyCallable: + """Decorate a function to log its calls.""" + + def _decorator(method: AnyCallable) -> AnyCallable: + @functools.wraps(method) + def _wrapped(self: Any, *args: Any) -> Any: + try: + # log(f"{method.__name__}() stack:\n{short_stack()}") + args_reprs = [] + for name, arg in zip(names, args): + if name is None: + continue + args_reprs.append(f"{name}={arg_repr(arg)}") + log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})") + ret = method(self, *args) + # log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})") + return ret + except Exception as exc: + log(f"!!{exc.__class__.__name__}: {exc}") + if 1: + log("".join(traceback.format_exception(exc))) + try: + assert sys_monitoring is not None + sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0) + except ValueError: + # We might have already shut off monitoring. + log("oops, shutting off events with disabled tool id") + raise + + return _wrapped + + return _decorator + +else: + + def log(msg: str) -> None: + """Write a message to our detailed debugging log(s), but not really.""" + + def panopticon(*names: str | None) -> AnyCallable: + """Decorate a function to log its calls, but not really.""" + + def _decorator(meth: AnyCallable) -> AnyCallable: + return meth + + return _decorator + + +@dataclass +class CodeInfo: + """The information we want about each code object.""" + + tracing: bool + file_data: TTraceFileData | None + byte_to_line: dict[TOffset, TLineNo] | None + + # Keys are start instruction offsets for branches. + # Values are dicts: + # { + # (from_line, to_line): {offset, offset, ...}, + # (from_line, to_line): {offset, offset, ...}, + # } + branch_trails: TBranchTrails + + # Always-jumps are bytecode offsets that do no work but move + # to another offset. + always_jumps: dict[TOffset, TOffset] + + +def bytes_to_lines(code: CodeType) -> dict[TOffset, TLineNo]: + """Make a dict mapping byte code offsets to line numbers.""" + b2l = {} + for bstart, bend, lineno in code.co_lines(): + if lineno is not None: + for boffset in range(bstart, bend, 2): + b2l[boffset] = lineno + return b2l + + +class SysMonitor(Tracer): + """Python implementation of the raw data tracer for PEP669 implementations.""" + + # One of these will be used across threads. Be careful. + + def __init__(self, tool_id: int) -> None: + # Attributes set from the collector: + self.data: TTraceData + self.trace_arcs = False + self.should_trace: TShouldTraceFn + self.should_trace_cache: dict[str, TFileDisposition | None] + # TODO: should_start_context and switch_context are unused! + # Change tests/testenv.py:DYN_CONTEXTS when this is updated. + self.should_start_context: TShouldStartContextFn | None = None + self.switch_context: Callable[[str | None], None] | None = None + self.lock_data: Callable[[], None] + self.unlock_data: Callable[[], None] + # TODO: warn is unused. + self.warn: TWarnFn + + self.myid = tool_id + + # Map id(code_object) -> CodeInfo + self.code_infos: dict[int, CodeInfo] = {} + # A list of code_objects, just to keep them alive so that id's are + # useful as identity. + self.code_objects: list[CodeType] = [] + self.sysmon_on = False + self.lock = threading.Lock() + + self.stats: dict[str, int] | None = None + if COLLECT_STATS: + self.stats = dict.fromkeys( + "starts start_tracing returns line_lines line_arcs branches branch_trails".split(), + 0, + ) + + self._activity = False + + def __repr__(self) -> str: + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" + + @panopticon() + def start(self) -> None: + """Start this Tracer.""" + with self.lock: + assert sys_monitoring is not None + sys_monitoring.use_tool_id(self.myid, "coverage.py") + register = functools.partial(sys_monitoring.register_callback, self.myid) + events = sys.monitoring.events + + sys_monitoring.set_events(self.myid, events.PY_START) + register(events.PY_START, self.sysmon_py_start) + if self.trace_arcs: + register(events.PY_RETURN, self.sysmon_py_return) + register(events.LINE, self.sysmon_line_arcs) + if env.PYBEHAVIOR.branch_right_left: + register( + events.BRANCH_RIGHT, # type:ignore[attr-defined] + self.sysmon_branch_either, + ) + register( + events.BRANCH_LEFT, # type:ignore[attr-defined] + self.sysmon_branch_either, + ) + else: + register(events.LINE, self.sysmon_line_lines) + sys_monitoring.restart_events() + self.sysmon_on = True + + @panopticon() + def stop(self) -> None: + """Stop this Tracer.""" + with self.lock: + if not self.sysmon_on: + # In forking situations, we might try to stop when we are not + # started. Do nothing in that case. + return + assert sys_monitoring is not None + sys_monitoring.set_events(self.myid, 0) + self.sysmon_on = False + sys_monitoring.free_tool_id(self.myid) + + @panopticon() + def post_fork(self) -> None: + """The process has forked, clean up as needed.""" + self.stop() + + def activity(self) -> bool: + """Has there been any activity?""" + return self._activity + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + self._activity = False + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + return self.stats + + @panopticon("code", "@") + def sysmon_py_start(self, code: CodeType, instruction_offset: TOffset) -> MonitorReturn: + """Handle sys.monitoring.events.PY_START events.""" + # Entering a new frame. Decide if we should trace in this file. + self._activity = True + if self.stats is not None: + self.stats["starts"] += 1 + + code_info = self.code_infos.get(id(code)) + tracing_code: bool | None = None + file_data: TTraceFileData | None = None + if code_info is not None: + tracing_code = code_info.tracing + file_data = code_info.file_data + + if tracing_code is None: + filename = code.co_filename + disp = self.should_trace_cache.get(filename) + if disp is None: + frame = inspect.currentframe() + if frame is not None: + frame = inspect.currentframe().f_back # type: ignore[union-attr] + if LOG: + # @panopticon adds a frame. + frame = frame.f_back # type: ignore[union-attr] + disp = self.should_trace(filename, frame) # type: ignore[arg-type] + self.should_trace_cache[filename] = disp + + tracing_code = disp.trace + if tracing_code: + tracename = disp.source_filename + assert tracename is not None + self.lock_data() + try: + if tracename not in self.data: + self.data[tracename] = set() + finally: + self.unlock_data() + file_data = self.data[tracename] + b2l = bytes_to_lines(code) + else: + file_data = None + b2l = None + + code_info = CodeInfo( + tracing=tracing_code, + file_data=file_data, + byte_to_line=b2l, + branch_trails={}, + always_jumps={}, + ) + self.code_infos[id(code)] = code_info + self.code_objects.append(code) + + if tracing_code: + if self.stats is not None: + self.stats["start_tracing"] += 1 + events = sys.monitoring.events + with self.lock: + if self.sysmon_on: + assert sys_monitoring is not None + local_events = events.PY_RETURN | events.PY_RESUME | events.LINE + if self.trace_arcs: + assert env.PYBEHAVIOR.branch_right_left + local_events |= ( + events.BRANCH_RIGHT # type:ignore[attr-defined] + | events.BRANCH_LEFT # type:ignore[attr-defined] + ) + sys_monitoring.set_local_events(self.myid, code, local_events) + + return DISABLE + + @panopticon("code", "@", None) + def sysmon_py_return( + self, + code: CodeType, + instruction_offset: TOffset, + retval: object, + ) -> MonitorReturn: + """Handle sys.monitoring.events.PY_RETURN events for branch coverage.""" + if self.stats is not None: + self.stats["returns"] += 1 + code_info = self.code_infos.get(id(code)) + # code_info is not None and code_info.file_data is not None, since we + # wouldn't have enabled this event if they were. + last_line = code_info.byte_to_line[instruction_offset] # type: ignore + if last_line is not None: + arc = (last_line, -code.co_firstlineno) + code_info.file_data.add(arc) # type: ignore + # log(f"adding {arc=}") + return DISABLE + + @panopticon("code", "line") + def sysmon_line_lines(self, code: CodeType, line_number: TLineNo) -> MonitorReturn: + """Handle sys.monitoring.events.LINE events for line coverage.""" + if self.stats is not None: + self.stats["line_lines"] += 1 + code_info = self.code_infos.get(id(code)) + # It should be true that code_info is not None and code_info.file_data + # is not None, since we wouldn't have enabled this event if they were. + # But somehow code_info can be None here, so we have to check. + if code_info is not None and code_info.file_data is not None: + code_info.file_data.add(line_number) # type: ignore + # log(f"adding {line_number=}") + return DISABLE + + @panopticon("code", "line") + def sysmon_line_arcs(self, code: CodeType, line_number: TLineNo) -> MonitorReturn: + """Handle sys.monitoring.events.LINE events for branch coverage.""" + if self.stats is not None: + self.stats["line_arcs"] += 1 + code_info = self.code_infos[id(code)] + # code_info is not None and code_info.file_data is not None, since we + # wouldn't have enabled this event if they were. + arc = (line_number, line_number) + code_info.file_data.add(arc) # type: ignore + # log(f"adding {arc=}") + return DISABLE + + @panopticon("code", "@", "@") + def sysmon_branch_either( + self, code: CodeType, instruction_offset: TOffset, destination_offset: TOffset + ) -> MonitorReturn: + """Handle BRANCH_RIGHT and BRANCH_LEFT events.""" + if self.stats is not None: + self.stats["branches"] += 1 + code_info = self.code_infos[id(code)] + # code_info is not None and code_info.file_data is not None, since we + # wouldn't have enabled this event if they were. + if not code_info.branch_trails: + if self.stats is not None: + self.stats["branch_trails"] += 1 + multiline_map = get_multiline_map(code.co_filename) + code_info.branch_trails = branch_trails(code, multiline_map=multiline_map) + code_info.always_jumps = always_jumps(code) + # log(f"branch_trails for {code}:\n{ppformat(code_info.branch_trails)}") + added_arc = False + dest_info = code_info.branch_trails.get(instruction_offset) + + # Re-map the destination offset through always-jumps to deal with NOP etc. + dests = {destination_offset} + while (dest := code_info.always_jumps.get(destination_offset)) is not None: + destination_offset = dest + dests.add(destination_offset) + + # log(f"dest_info = {ppformat(dest_info)}") + if dest_info is not None: + for arc, offsets in dest_info.items(): + if arc is None: + continue + if dests & offsets: + code_info.file_data.add(arc) # type: ignore + # log(f"adding {arc=}") + added_arc = True + break + + if not added_arc: + # This could be an exception jumping from line to line. + assert code_info.byte_to_line is not None + l1 = code_info.byte_to_line[instruction_offset] + l2 = code_info.byte_to_line.get(destination_offset) + if l2 is not None and l1 != l2: + arc = (l1, l2) + code_info.file_data.add(arc) # type: ignore + # log(f"adding unforeseen {arc=}") + + return DISABLE + + +@functools.lru_cache(maxsize=5) +def get_multiline_map(filename: str) -> dict[TLineNo, TLineNo]: + """Get a PythonParser for the given filename, cached.""" + parser = PythonParser(filename=filename) + try: + parser.parse_source() + except NotPython: + # The file was not Python. This can happen when the code object refers + # to an original non-Python source file, like a Jinja template. + # In that case, just return an empty map, which might lead to slightly + # wrong branch coverage, but we don't have any better option. + return {} + return parser.multiline_map diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/templite.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/templite.py new file mode 100644 index 0000000..e4abd00 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/templite.py @@ -0,0 +1,306 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""A simple Python template renderer, for a nano-subset of Django syntax. + +For a detailed discussion of this code, see this chapter from 500 Lines: +http://aosabook.org/en/500L/a-template-engine.html + +""" + +# Coincidentally named the same as http://code.activestate.com/recipes/496702/ + +from __future__ import annotations + +import re +from typing import Any, Callable, NoReturn, cast + + +class TempliteSyntaxError(ValueError): + """Raised when a template has a syntax error.""" + + pass + + +class TempliteValueError(ValueError): + """Raised when an expression won't evaluate in a template.""" + + pass + + +class CodeBuilder: + """Build source code conveniently.""" + + def __init__(self, indent: int = 0) -> None: + self.code: list[str | CodeBuilder] = [] + self.indent_level = indent + + def __str__(self) -> str: + return "".join(str(c) for c in self.code) + + def add_line(self, line: str) -> None: + """Add a line of source to the code. + + Indentation and newline will be added for you, don't provide them. + + """ + self.code.extend([" " * self.indent_level, line, "\n"]) + + def add_section(self) -> CodeBuilder: + """Add a section, a sub-CodeBuilder.""" + section = CodeBuilder(self.indent_level) + self.code.append(section) + return section + + INDENT_STEP = 4 # PEP8 says so! + + def indent(self) -> None: + """Increase the current indent for following lines.""" + self.indent_level += self.INDENT_STEP + + def dedent(self) -> None: + """Decrease the current indent for following lines.""" + self.indent_level -= self.INDENT_STEP + + def get_globals(self) -> dict[str, Any]: + """Execute the code, and return a dict of globals it defines.""" + # A check that the caller really finished all the blocks they started. + assert self.indent_level == 0 + # Get the Python source as a single string. + python_source = str(self) + # Execute the source, defining globals, and return them. + global_namespace: dict[str, Any] = {} + exec(python_source, global_namespace) + return global_namespace + + +class Templite: + """A simple template renderer, for a nano-subset of Django syntax. + + Supported constructs are extended variable access:: + + {{var.modifier.modifier|filter|filter}} + + loops:: + + {% for var in list %}...{% endfor %} + + and ifs:: + + {% if var %}...{% endif %} + + Comments are within curly-hash markers:: + + {# This will be ignored #} + + Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped + and joined. Be careful, this could join words together! + + Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), + which will collapse the white space following the tag. + + Construct a Templite with the template text, then use `render` against a + dictionary context to create a finished string:: + + templite = Templite(''' +

Hello {{name|upper}}!

+ {% for topic in topics %} +

You are interested in {{topic}}.

+ {% endif %} + ''', + {"upper": str.upper}, + ) + text = templite.render({ + "name": "Ned", + "topics": ["Python", "Geometry", "Juggling"], + }) + + """ + + def __init__(self, text: str, *contexts: dict[str, Any]) -> None: + """Construct a Templite with the given `text`. + + `contexts` are dictionaries of values to use for future renderings. + These are good for filters and global values. + + """ + self.context = {} + for context in contexts: + self.context.update(context) + + self.all_vars: set[str] = set() + self.loop_vars: set[str] = set() + + # We construct a function in source form, then compile it and hold onto + # it, and execute it to render the template. + code = CodeBuilder() + + code.add_line("def render_function(context, do_dots):") + code.indent() + vars_code = code.add_section() + code.add_line("result = []") + code.add_line("append_result = result.append") + code.add_line("extend_result = result.extend") + code.add_line("to_str = str") + + buffered: list[str] = [] + + def flush_output() -> None: + """Force `buffered` to the code builder.""" + if len(buffered) == 1: + code.add_line("append_result(%s)" % buffered[0]) + elif len(buffered) > 1: + code.add_line("extend_result([%s])" % ", ".join(buffered)) + del buffered[:] + + ops_stack = [] + + # Split the text to form a list of tokens. + tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) + + squash = in_joined = False + + for token in tokens: + if token.startswith("{"): + start, end = 2, -2 + squash = (token[-3] == "-") # fmt: skip + if squash: + end = -3 + + if token.startswith("{#"): + # Comment: ignore it and move on. + continue + elif token.startswith("{{"): + # An expression to evaluate. + expr = self._expr_code(token[start:end].strip()) + buffered.append("to_str(%s)" % expr) + else: + # token.startswith("{%") + # Action tag: split into words and parse further. + flush_output() + + words = token[start:end].strip().split() + if words[0] == "if": + # An if statement: evaluate the expression to determine if. + if len(words) != 2: + self._syntax_error("Don't understand if", token) + ops_stack.append("if") + code.add_line("if %s:" % self._expr_code(words[1])) + code.indent() + elif words[0] == "for": + # A loop: iterate over expression result. + if len(words) != 4 or words[2] != "in": + self._syntax_error("Don't understand for", token) + ops_stack.append("for") + self._variable(words[1], self.loop_vars) + code.add_line( + f"for c_{words[1]} in {self._expr_code(words[3])}:", + ) + code.indent() + elif words[0] == "joined": + ops_stack.append("joined") + in_joined = True + elif words[0].startswith("end"): + # Endsomething. Pop the ops stack. + if len(words) != 1: + self._syntax_error("Don't understand end", token) + end_what = words[0][3:] + if not ops_stack: + self._syntax_error("Too many ends", token) + start_what = ops_stack.pop() + if start_what != end_what: + self._syntax_error("Mismatched end tag", end_what) + if end_what == "joined": + in_joined = False + else: + code.dedent() + else: + self._syntax_error("Don't understand tag", words[0]) + else: + # Literal content. If it isn't empty, output it. + if in_joined: + token = re.sub(r"\s*\n\s*", "", token.strip()) + elif squash: + token = token.lstrip() + if token: + buffered.append(repr(token)) + + if ops_stack: + self._syntax_error("Unmatched action tag", ops_stack[-1]) + + flush_output() + + for var_name in self.all_vars - self.loop_vars: + vars_code.add_line(f"c_{var_name} = context[{var_name!r}]") + + code.add_line("return ''.join(result)") + code.dedent() + self._render_function = cast( + Callable[ + [dict[str, Any], Callable[..., Any]], + str, + ], + code.get_globals()["render_function"], + ) + + def _expr_code(self, expr: str) -> str: + """Generate a Python expression for `expr`.""" + if "|" in expr: + pipes = expr.split("|") + code = self._expr_code(pipes[0]) + for func in pipes[1:]: + self._variable(func, self.all_vars) + code = f"c_{func}({code})" + elif "." in expr: + dots = expr.split(".") + code = self._expr_code(dots[0]) + args = ", ".join(repr(d) for d in dots[1:]) + code = f"do_dots({code}, {args})" + else: + self._variable(expr, self.all_vars) + code = "c_%s" % expr + return code + + def _syntax_error(self, msg: str, thing: Any) -> NoReturn: + """Raise a syntax error using `msg`, and showing `thing`.""" + raise TempliteSyntaxError(f"{msg}: {thing!r}") + + def _variable(self, name: str, vars_set: set[str]) -> None: + """Track that `name` is used as a variable. + + Adds the name to `vars_set`, a set of variable names. + + Raises an syntax error if `name` is not a valid name. + + """ + if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name): + self._syntax_error("Not a valid name", name) + vars_set.add(name) + + def render(self, context: dict[str, Any] | None = None) -> str: + """Render this template by applying it to `context`. + + `context` is a dictionary of values to use in this rendering. + + """ + # Make the complete context we'll use. + render_context = dict(self.context) + if context: + render_context.update(context) + return self._render_function(render_context, self._do_dots) + + def _do_dots(self, value: Any, *dots: str) -> Any: + """Evaluate dotted expressions at run-time.""" + for dot in dots: + try: + value = getattr(value, dot) + except AttributeError: + try: + value = value[dot] + except (TypeError, KeyError) as exc: + raise TempliteValueError( + f"Couldn't evaluate {value!r}.{dot}", + ) from exc + if callable(value): + value = value() + return value diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tomlconfig.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tomlconfig.py new file mode 100644 index 0000000..2a1f7df --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tomlconfig.py @@ -0,0 +1,210 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""TOML configuration support for coverage.py""" + +from __future__ import annotations + +import os +import re +from collections.abc import Iterable +from typing import Any, Callable, TypeVar + +from coverage import config, env +from coverage.exceptions import ConfigError +from coverage.misc import import_third_party, isolate_module, substitute_variables +from coverage.types import TConfigSectionOut, TConfigValueOut + +os = isolate_module(os) + +if env.PYVERSION >= (3, 11, 0, "alpha", 7): + import tomllib # pylint: disable=import-error + + has_tomllib = True +else: + # TOML support on Python 3.10 and below is an install-time extra option. + tomllib, has_tomllib = import_third_party("tomli") + + +class TomlDecodeError(Exception): + """An exception class that exists even when toml isn't installed.""" + + pass + + +TWant = TypeVar("TWant") + + +class TomlConfigParser: + """TOML file reading with the interface of HandyConfigParser.""" + + # This class has the same interface as config.HandyConfigParser, no + # need for docstrings. + # pylint: disable=missing-function-docstring + + def __init__(self, our_file: bool) -> None: + self.our_file = our_file + self.data: dict[str, Any] = {} + + def read(self, filenames: Iterable[str]) -> list[str]: + # RawConfigParser takes a filename or list of filenames, but we only + # ever call this with a single filename. + assert isinstance(filenames, (bytes, str, os.PathLike)) + filename = os.fspath(filenames) + + try: + with open(filename, encoding="utf-8") as fp: + toml_text = fp.read() + except OSError: + return [] + if has_tomllib: + try: + self.data = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as err: + raise TomlDecodeError(str(err)) from err + return [filename] + else: + has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE) + if self.our_file or has_toml: + # Looks like they meant to read TOML, but we can't read it. + msg = "Can't read {!r} without TOML support. Install with [toml] extra" + raise ConfigError(msg.format(filename)) + return [] + + def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]: + """Get a section from the data. + + Arguments: + section (str): A section name, which can be dotted. + + Returns: + name (str): the actual name of the section that was found, if any, + or None. + data (str): the dict of data in the section, or None if not found. + + """ + prefixes = ["tool.coverage."] + for prefix in prefixes: + real_section = prefix + section + parts = real_section.split(".") + try: + data = self.data[parts[0]] + for part in parts[1:]: + data = data[part] + except KeyError: + continue + break + else: + return None, None + return real_section, data + + def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]: + """Like .get, but returns the real section name and the value.""" + name, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + assert name is not None + try: + value = data[option] + except KeyError: + raise ConfigError(f"No option {option!r} in section: {name!r}") from None + return name, value + + def _get_single(self, section: str, option: str) -> Any: + """Get a single-valued option. + + Performs environment substitution if the value is a string. Other types + will be converted later as needed. + """ + name, value = self._get(section, option) + if isinstance(value, str): + value = substitute_variables(value, os.environ) + return name, value + + def has_option(self, section: str, option: str) -> bool: + _, data = self._get_section(section) + if data is None: + return False + return option in data + + def real_section(self, section: str) -> str | None: + name, _ = self._get_section(section) + return name + + def has_section(self, section: str) -> bool: + name, _ = self._get_section(section) + return bool(name) + + def options(self, section: str) -> list[str]: + _, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + return list(data.keys()) + + def get_section(self, section: str) -> TConfigSectionOut: + _, data = self._get_section(section) + return data or {} + + def get(self, section: str, option: str) -> Any: + _, value = self._get_single(section, option) + return value + + def _check_type( + self, + section: str, + option: str, + value: Any, + type_: type[TWant], + converter: Callable[[Any], TWant] | None, + type_desc: str, + ) -> TWant: + """Check that `value` has the type we want, converting if needed. + + Returns the resulting value of the desired type. + """ + if isinstance(value, type_): + return value + if isinstance(value, str) and converter is not None: + try: + return converter(value) + except Exception as e: + raise ValueError( + f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}", + ) from e + raise ValueError( + f"Option [{section}]{option} is not {type_desc}: {value!r}", + ) + + def getboolean(self, section: str, option: str) -> bool: + name, value = self._get_single(section, option) + bool_strings = {"true": True, "false": False} + return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") + + def getfile(self, section: str, option: str) -> str: + _, value = self._get_single(section, option) + return config.process_file_value(value) + + def _get_list(self, section: str, option: str) -> tuple[str, list[str]]: + """Get a list of strings, substituting environment variables in the elements.""" + name, values = self._get(section, option) + values = self._check_type(name, option, values, list, None, "a list") + values = [substitute_variables(value, os.environ) for value in values] + return name, values + + def getlist(self, section: str, option: str) -> list[str]: + _, values = self._get_list(section, option) + return values + + def getregexlist(self, section: str, option: str) -> list[str]: + name, values = self._get_list(section, option) + return config.process_regexlist(name, option, values) + + def getint(self, section: str, option: str) -> int: + name, value = self._get_single(section, option) + return self._check_type(name, option, value, int, int, "an integer") + + def getfloat(self, section: str, option: str) -> float: + name, value = self._get_single(section, option) + if isinstance(value, int): + value = float(value) + return self._check_type(name, option, value, float, float, "a float") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.cpython-311-darwin.so b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.cpython-311-darwin.so new file mode 100755 index 0000000..be99f98 Binary files /dev/null and b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.cpython-311-darwin.so differ diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.pyi b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.pyi new file mode 100644 index 0000000..c45bf39 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/tracer.pyi @@ -0,0 +1,43 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""Typing information for the constructs from our .c files.""" + +from typing import Any, Dict + +from coverage.types import TFileDisposition, TTraceData, TTraceFn, Tracer + +class CFileDisposition(TFileDisposition): + """CFileDisposition is in ctracer/filedisp.c""" + + canonical_filename: Any + file_tracer: Any + has_dynamic_filename: Any + original_filename: Any + reason: Any + source_filename: Any + trace: Any + def __init__(self) -> None: ... + +class CTracer(Tracer): + """CTracer is in ctracer/tracer.c""" + + check_include: Any + concur_id_func: Any + data: TTraceData + disable_plugin: Any + file_tracers: Any + should_start_context: Any + should_trace: Any + should_trace_cache: Any + switch_context: Any + lock_data: Any + unlock_data: Any + trace_arcs: Any + warn: Any + def __init__(self) -> None: ... + def activity(self) -> bool: ... + def get_stats(self) -> Dict[str, int]: ... + def reset_activity(self) -> Any: ... + def start(self) -> TTraceFn: ... + def stop(self) -> None: ... diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/types.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/types.py new file mode 100644 index 0000000..8298e89 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/types.py @@ -0,0 +1,206 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +""" +Types for use throughout coverage.py. +""" + +from __future__ import annotations + +import os +import pathlib +from collections.abc import Iterable, Mapping +from types import FrameType, ModuleType +from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + + +AnyCallable = Callable[..., Any] + +## File paths + +# For arguments that are file paths: +FilePath = str | os.PathLike[str] +# For testing FilePath arguments +FilePathClasses = [str, pathlib.Path] +FilePathType = type[str] | type[pathlib.Path] + +## Python tracing + + +class TTraceFn(Protocol): + """A Python trace function.""" + + def __call__( + self, + frame: FrameType, + event: str, + arg: Any, + lineno: TLineNo | None = None, # Our own twist, see collector.py + ) -> TTraceFn | None: ... + + +## Coverage.py tracing + +# Line numbers are pervasive enough that they deserve their own type. +TLineNo = int + +# Bytecode offsets are pervasive enough that they deserve their own type. +TOffset = int + +TArc = tuple[TLineNo, TLineNo] + + +class TFileDisposition(Protocol): + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: str | None + trace: bool + reason: str + file_tracer: FileTracer | None + has_dynamic_filename: bool + + +# When collecting data, we use a dictionary with a few possible shapes. The +# keys are always file names. +# - If measuring line coverage, the values are sets of line numbers. +# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs +# of line numbers). +# - If measuring arcs in the C tracer, the values are sets of packed arcs (two +# line numbers combined into one integer). + +TTraceFileData = set[TLineNo] | set[TArc] | set[int] + +TTraceData = dict[str, TTraceFileData] + +# Functions passed into collectors. +TShouldTraceFn = Callable[[str, FrameType], TFileDisposition] +TCheckIncludeFn = Callable[[str, FrameType], bool] +TShouldStartContextFn = Callable[[FrameType], str | None] + + +class Tracer(Protocol): + """Anything that can report on Python execution.""" + + data: TTraceData + trace_arcs: bool + should_trace: TShouldTraceFn + should_trace_cache: Mapping[str, TFileDisposition | None] + should_start_context: TShouldStartContextFn | None + switch_context: Callable[[str | None], None] | None + lock_data: Callable[[], None] + unlock_data: Callable[[], None] + warn: TWarnFn + + def __init__(self) -> None: ... + + def start(self) -> TTraceFn | None: + """Start this tracer, return a trace function if based on sys.settrace.""" + + def stop(self) -> None: + """Stop this tracer.""" + + def activity(self) -> bool: + """Has there been any activity?""" + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + + +## Coverage + +# Many places use kwargs as Coverage kwargs. +TCovKwargs = Any + + +## Configuration + +# One value read from a config file. +TConfigValueIn = Optional[bool | int | float | str | Iterable[str] | Mapping[str, Iterable[str]]] +TConfigValueOut = Optional[bool | int | float | str | list[str] | dict[str, list[str]]] +# An entire config section, mapping option names to values. +TConfigSectionIn = Mapping[str, TConfigValueIn] +TConfigSectionOut = Mapping[str, TConfigValueOut] + + +class TConfigurable(Protocol): + """Something that can proxy to the coverage configuration settings.""" + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + + +class TPluginConfig(Protocol): + """Something that can provide options to a plugin.""" + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get the options for a plugin.""" + + +## Parsing + +TMorf = ModuleType | str + +TSourceTokenLines = Iterable[list[tuple[str, str]]] + + +## Plugins + + +class TPlugin(Protocol): + """What all plugins have in common.""" + + _coverage_plugin_name: str + _coverage_enabled: bool + + +## Debugging + + +class TWarnFn(Protocol): + """A callable warn() function.""" + + def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ... + + +class TDebugCtl(Protocol): + """A DebugControl object, or something like it.""" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + + def write(self, msg: str) -> None: + """Write a line of debug output.""" + + +class TWritable(Protocol): + """Anything that can be written to.""" + + def write(self, msg: str) -> None: + """Write a message.""" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/version.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/version.py new file mode 100644 index 0000000..0205a36 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/version.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""The version and URL for coverage.py""" +# This file is exec'ed in setup.py, don't import anything! + +from __future__ import annotations + +# version_info: same semantics as sys.version_info. +# _dev: the .devN suffix if any. +version_info = (7, 11, 3, "final", 0) +_dev = 0 + + +def _make_version( + major: int, + minor: int, + micro: int, + releaselevel: str = "final", + serial: int = 0, + dev: int = 0, +) -> str: + """Create a readable version string from version_info tuple components.""" + assert releaselevel in ["alpha", "beta", "candidate", "final"] + version = f"{major}.{minor}.{micro}" + if releaselevel != "final": + short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel] + version += f"{short}{serial}" + if dev != 0: + version += f".dev{dev}" + return version + + +__version__ = _make_version(*version_info, _dev) +__url__ = f"https://coverage.readthedocs.io/en/{__version__}" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/xmlreport.py b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/xmlreport.py new file mode 100644 index 0000000..46dedf2 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/coverage/xmlreport.py @@ -0,0 +1,264 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt + +"""XML reporting for coverage.py""" + +from __future__ import annotations + +import os +import os.path +import sys +import time +import xml.dom.minidom +from collections.abc import Iterable +from dataclasses import dataclass +from typing import IO, TYPE_CHECKING, Any + +from coverage import __version__, files +from coverage.misc import human_sorted, human_sorted_items, isolate_module +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf +from coverage.version import __url__ + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd" + + +def rate(hit: int, num: int) -> str: + """Return the fraction of `hit`/`num`, as a string.""" + if num == 0: + return "1" + else: + return f"{hit / num:.4g}" + + +@dataclass +class PackageData: + """Data we keep about each "package" (in Java terms).""" + + elements: dict[str, xml.dom.minidom.Element] + hits: int + lines: int + br_hits: int + branches: int + + +def appendChild(parent: Any, child: Any) -> None: + """Append a child to a parent, in a way mypy will shut up about.""" + parent.appendChild(child) + + +class XmlReporter: + """A reporter for writing Cobertura-style XML coverage results.""" + + report_type = "XML report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + + self.source_paths = set() + if self.config.source: + for src in self.config.source: + if os.path.exists(src): + if self.config.relative_files: + src = src.rstrip(r"\/") + else: + src = files.canonical_filename(src) + self.source_paths.add(src) + self.packages: dict[str, PackageData] = {} + self.xml_out: xml.dom.minidom.Document + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: + """Generate a Cobertura-compatible XML report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the XML to. + + """ + # Initial setup. + outfile = outfile or sys.stdout + has_arcs = self.coverage.get_data().has_arcs() + + # Create the DOM that will store the data. + impl = xml.dom.minidom.getDOMImplementation() + assert impl is not None + self.xml_out = impl.createDocument(None, "coverage", None) + + # Write header stuff. + xcoverage = self.xml_out.documentElement + assert xcoverage is not None + xcoverage.setAttribute("version", __version__) + xcoverage.setAttribute("timestamp", str(int(time.time() * 1000))) + xcoverage.appendChild( + self.xml_out.createComment( + f" Generated by coverage.py: {__url__} ", + ) + ) + xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) + + # Call xml_file for each file in the data. + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.xml_file(fr, analysis, has_arcs) + + xsources = self.xml_out.createElement("sources") + xcoverage.appendChild(xsources) + + # Populate the XML DOM with the source info. + for path in human_sorted(self.source_paths): + xsource = self.xml_out.createElement("source") + appendChild(xsources, xsource) + txt = self.xml_out.createTextNode(path) + appendChild(xsource, txt) + + lnum_tot, lhits_tot = 0, 0 + bnum_tot, bhits_tot = 0, 0 + + xpackages = self.xml_out.createElement("packages") + xcoverage.appendChild(xpackages) + + # Populate the XML DOM with the package info. + for pkg_name, pkg_data in human_sorted_items(self.packages.items()): + xpackage = self.xml_out.createElement("package") + appendChild(xpackages, xpackage) + xclasses = self.xml_out.createElement("classes") + appendChild(xpackage, xclasses) + for _, class_elt in human_sorted_items(pkg_data.elements.items()): + appendChild(xclasses, class_elt) + xpackage.setAttribute("name", pkg_name.replace(os.sep, ".")) + xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines)) + if has_arcs: + branch_rate = rate(pkg_data.br_hits, pkg_data.branches) + else: + branch_rate = "0" + xpackage.setAttribute("branch-rate", branch_rate) + xpackage.setAttribute("complexity", "0") + + lhits_tot += pkg_data.hits + lnum_tot += pkg_data.lines + bhits_tot += pkg_data.br_hits + bnum_tot += pkg_data.branches + + xcoverage.setAttribute("lines-valid", str(lnum_tot)) + xcoverage.setAttribute("lines-covered", str(lhits_tot)) + xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) + if has_arcs: + xcoverage.setAttribute("branches-valid", str(bnum_tot)) + xcoverage.setAttribute("branches-covered", str(bhits_tot)) + xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) + else: + xcoverage.setAttribute("branches-covered", "0") + xcoverage.setAttribute("branches-valid", "0") + xcoverage.setAttribute("branch-rate", "0") + xcoverage.setAttribute("complexity", "0") + + # Write the output file. + outfile.write(serialize_xml(self.xml_out)) + + # Return the total percentage. + denom = lnum_tot + bnum_tot + if denom == 0: + pct = 0.0 + else: + pct = 100.0 * (lhits_tot + bhits_tot) / denom + return pct + + def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None: + """Add to the XML report for a single file.""" + + if self.config.skip_empty: + if analysis.numbers.n_statements == 0: + return + + # Create the "lines" and "package" XML elements, which + # are populated later. Note that a package == a directory. + filename = fr.filename.replace("\\", "/") + for source_path in self.source_paths: + if not self.config.relative_files: + source_path = files.canonical_filename(source_path) + if filename.startswith(source_path.replace("\\", "/") + "/"): + rel_name = filename[len(source_path) + 1 :] + break + else: + rel_name = fr.relative_filename().replace("\\", "/") + self.source_paths.add(fr.filename[: -len(rel_name)].rstrip(r"\/")) + + dirname = os.path.dirname(rel_name) or "." + dirname = "/".join(dirname.split("/")[: self.config.xml_package_depth]) + package_name = dirname.replace("/", ".") + + package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) + + xclass: xml.dom.minidom.Element = self.xml_out.createElement("class") + + appendChild(xclass, self.xml_out.createElement("methods")) + + xlines = self.xml_out.createElement("lines") + appendChild(xclass, xlines) + + xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) + xclass.setAttribute("filename", rel_name.replace("\\", "/")) + xclass.setAttribute("complexity", "0") + + branch_stats = analysis.branch_stats() + missing_branch_arcs = analysis.missing_branch_arcs() + + # For each statement, create an XML "line" element. + for line in sorted(analysis.statements): + xline = self.xml_out.createElement("line") + xline.setAttribute("number", str(line)) + + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + xline.setAttribute("hits", str(int(line not in analysis.missing))) + + if has_arcs: + if line in branch_stats: + total, taken = branch_stats[line] + xline.setAttribute("branch", "true") + xline.setAttribute( + "condition-coverage", + f"{100 * taken // total}% ({taken}/{total})", + ) + if line in missing_branch_arcs: + annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] + xline.setAttribute("missing-branches", ",".join(annlines)) + appendChild(xlines, xline) + + class_lines = len(analysis.statements) + class_hits = class_lines - len(analysis.missing) + + if has_arcs: + class_branches = sum(t for t, k in branch_stats.values()) + missing_branches = sum(t - k for t, k in branch_stats.values()) + class_br_hits = class_branches - missing_branches + else: + class_branches = 0 + class_br_hits = 0 + + # Finalize the statistics that are collected in the XML DOM. + xclass.setAttribute("line-rate", rate(class_hits, class_lines)) + if has_arcs: + branch_rate = rate(class_br_hits, class_branches) + else: + branch_rate = "0" + xclass.setAttribute("branch-rate", branch_rate) + + package.elements[rel_name] = xclass + package.hits += class_hits + package.lines += class_lines + package.br_hits += class_br_hits + package.branches += class_branches + + +def serialize_xml(dom: xml.dom.minidom.Document) -> str: + """Serialize a minidom node to XML.""" + return dom.toprettyxml() diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/distutils-precedence.pth b/tools/converter-generator/venv/lib/python3.11/site-packages/distutils-precedence.pth new file mode 100644 index 0000000..7f009fe --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/distutils-precedence.pth @@ -0,0 +1 @@ +import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim(); diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/INSTALLER b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/METADATA b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/METADATA new file mode 100644 index 0000000..fc3c00d --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/METADATA @@ -0,0 +1,79 @@ +Metadata-Version: 2.4 +Name: iniconfig +Version: 2.3.0 +Summary: brain-dead simple config-ini parsing +Author-email: Ronny Pfannschmidt , Holger Krekel +License-Expression: MIT +Project-URL: Homepage, https://github.com/pytest-dev/iniconfig +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=3.10 +Description-Content-Type: text/x-rst +License-File: LICENSE +Dynamic: license-file + +iniconfig: brain-dead simple parsing of ini files +======================================================= + +iniconfig is a small and simple INI-file parser module +having a unique set of features: + +* maintains order of sections and entries +* supports multi-line values with or without line-continuations +* supports "#" comments everywhere +* raises errors with proper line-numbers +* no bells and whistles like automatic substitutions +* iniconfig raises an Error if two sections have the same name. + +If you encounter issues or have feature wishes please report them to: + + https://github.com/RonnyPfannschmidt/iniconfig/issues + +Basic Example +=================================== + +If you have an ini file like this: + +.. code-block:: ini + + # content of example.ini + [section1] # comment + name1=value1 # comment + name1b=value1,value2 # comment + + [section2] + name2= + line1 + line2 + +then you can do: + +.. code-block:: pycon + + >>> import iniconfig + >>> ini = iniconfig.IniConfig("example.ini") + >>> ini['section1']['name1'] # raises KeyError if not exists + 'value1' + >>> ini.get('section1', 'name1b', [], lambda x: x.split(",")) + ['value1', 'value2'] + >>> ini.get('section1', 'notexist', [], lambda x: x.split(",")) + [] + >>> [x.name for x in list(ini)] + ['section1', 'section2'] + >>> list(list(ini)[0].items()) + [('name1', 'value1'), ('name1b', 'value1,value2')] + >>> 'section1' in ini + True + >>> 'inexistendsection' in ini + False diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/RECORD b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/RECORD new file mode 100644 index 0000000..5b9993f --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/RECORD @@ -0,0 +1,15 @@ +iniconfig-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +iniconfig-2.3.0.dist-info/METADATA,sha256=QNdz-E5OES9JW79PG-nL0tRWwK6271MR910b8yLyFls,2526 +iniconfig-2.3.0.dist-info/RECORD,, +iniconfig-2.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +iniconfig-2.3.0.dist-info/licenses/LICENSE,sha256=NAn6kfes5VeJRjJnZlbjImT-XvdYFTVyXcmiN3RVG9Q,1098 +iniconfig-2.3.0.dist-info/top_level.txt,sha256=7KfM0fugdlToj9UW7enKXk2HYALQD8qHiyKtjhSzgN8,10 +iniconfig/__init__.py,sha256=XL5eqUYj4mskAOorZ5jfRAinJvJzTI-fJxpP4xfXtaw,7497 +iniconfig/__pycache__/__init__.cpython-311.pyc,, +iniconfig/__pycache__/_parse.cpython-311.pyc,, +iniconfig/__pycache__/_version.cpython-311.pyc,, +iniconfig/__pycache__/exceptions.cpython-311.pyc,, +iniconfig/_parse.py,sha256=5ncBl7MAQiaPNnpRrs9FR4t6G6DkgOUs458OY_1CR28,5223 +iniconfig/_version.py,sha256=KNFYe-Vtdt7Z-oHyl8jmDAQ9qXoCNMAEXigj6BR1QUI,704 +iniconfig/exceptions.py,sha256=mipQ_aMxD9CvSvFWN1oTXY4QuRnKAMZ1f3sCdmjDTU0,399 +iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/WHEEL b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/WHEEL new file mode 100644 index 0000000..e7fa31b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/licenses/LICENSE b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..46f4b28 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2010 - 2023 Holger Krekel and others + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/top_level.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..9dda536 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig-2.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +iniconfig diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/__init__.py new file mode 100644 index 0000000..b84809f --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/__init__.py @@ -0,0 +1,249 @@ +"""brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" + +import os +from collections.abc import Callable +from collections.abc import Iterator +from collections.abc import Mapping +from typing import Final +from typing import TypeVar +from typing import overload + +__all__ = ["IniConfig", "ParseError", "COMMENTCHARS", "iscommentline"] + +from . import _parse +from ._parse import COMMENTCHARS +from ._parse import iscommentline +from .exceptions import ParseError + +_D = TypeVar("_D") +_T = TypeVar("_T") + + +class SectionWrapper: + config: Final["IniConfig"] + name: Final[str] + + def __init__(self, config: "IniConfig", name: str) -> None: + self.config = config + self.name = name + + def lineof(self, name: str) -> int | None: + return self.config.lineof(self.name, name) + + @overload + def get(self, key: str) -> str | None: ... + + @overload + def get( + self, + key: str, + convert: Callable[[str], _T], + ) -> _T | None: ... + + @overload + def get( + self, + key: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: ... + + @overload + def get(self, key: str, default: _D, convert: None = None) -> str | _D: ... + + @overload + def get( + self, + key: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: ... + + # TODO: investigate possible mypy bug wrt matching the passed over data + def get( # type: ignore [misc] + self, + key: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key: str) -> str: + return self.config.sections[self.name][key] + + def __iter__(self) -> Iterator[str]: + section: Mapping[str, str] = self.config.sections.get(self.name, {}) + + def lineof(key: str) -> int: + return self.config.lineof(self.name, key) # type: ignore[return-value] + + yield from sorted(section, key=lineof) + + def items(self) -> Iterator[tuple[str, str]]: + for name in self: + yield name, self[name] + + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + _sources: Final[Mapping[tuple[str, str | None], int]] + + def __init__( + self, + path: str | os.PathLike[str], + data: str | None = None, + encoding: str = "utf-8", + *, + _sections: Mapping[str, Mapping[str, str]] | None = None, + _sources: Mapping[tuple[str, str | None], int] | None = None, + ) -> None: + self.path = os.fspath(path) + + # Determine sections and sources + if _sections is not None and _sources is not None: + # Use provided pre-parsed data (called from parse()) + sections_data = _sections + sources = _sources + else: + # Parse the data (backward compatible path) + if data is None: + with open(self.path, encoding=encoding) as fp: + data = fp.read() + + # Use old behavior (no stripping) for backward compatibility + sections_data, sources = _parse.parse_ini_data( + self.path, data, strip_inline_comments=False + ) + + # Assign once to Final attributes + self._sources = sources + self.sections = sections_data + + @classmethod + def parse( + cls, + path: str | os.PathLike[str], + data: str | None = None, + encoding: str = "utf-8", + *, + strip_inline_comments: bool = True, + strip_section_whitespace: bool = False, + ) -> "IniConfig": + """Parse an INI file. + + Args: + path: Path to the INI file (used for error messages) + data: Optional INI content as string. If None, reads from path. + encoding: Encoding to use when reading the file (default: utf-8) + strip_inline_comments: Whether to strip inline comments from values + (default: True). When True, comments starting with # or ; are + removed from values, matching the behavior for section comments. + strip_section_whitespace: Whether to strip whitespace from section and key names + (default: False). When True, strips Unicode whitespace from section and key names, + addressing issue #4. When False, preserves existing behavior for backward compatibility. + + Returns: + IniConfig instance with parsed configuration + + Example: + # With comment stripping (default): + config = IniConfig.parse("setup.cfg") + # value = "foo" instead of "foo # comment" + + # Without comment stripping (old behavior): + config = IniConfig.parse("setup.cfg", strip_inline_comments=False) + # value = "foo # comment" + + # With section name stripping (opt-in for issue #4): + config = IniConfig.parse("setup.cfg", strip_section_whitespace=True) + # section names and keys have Unicode whitespace stripped + """ + fspath = os.fspath(path) + + if data is None: + with open(fspath, encoding=encoding) as fp: + data = fp.read() + + sections_data, sources = _parse.parse_ini_data( + fspath, + data, + strip_inline_comments=strip_inline_comments, + strip_section_whitespace=strip_section_whitespace, + ) + + # Call constructor with pre-parsed sections and sources + return cls(path=fspath, _sections=sections_data, _sources=sources) + + def lineof(self, section: str, name: str | None = None) -> int | None: + lineno = self._sources.get((section, name)) + return None if lineno is None else lineno + 1 + + @overload + def get( + self, + section: str, + name: str, + ) -> str | None: ... + + @overload + def get( + self, + section: str, + name: str, + convert: Callable[[str], _T], + ) -> _T | None: ... + + @overload + def get( + self, + section: str, + name: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: ... + + @overload + def get( + self, section: str, name: str, default: _D, convert: None = None + ) -> str | _D: ... + + @overload + def get( + self, + section: str, + name: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: ... + + def get( # type: ignore + self, + section: str, + name: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + try: + value: str = self.sections[section][name] + except KeyError: + return default + else: + if convert is not None: + return convert(value) + else: + return value + + def __getitem__(self, name: str) -> SectionWrapper: + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self) -> Iterator[SectionWrapper]: + for name in sorted(self.sections, key=self.lineof): # type: ignore + yield SectionWrapper(self, name) + + def __contains__(self, arg: str) -> bool: + return arg in self.sections diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_parse.py b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_parse.py new file mode 100644 index 0000000..57b9b44 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_parse.py @@ -0,0 +1,163 @@ +from collections.abc import Mapping +from typing import NamedTuple + +from .exceptions import ParseError + +COMMENTCHARS = "#;" + + +class ParsedLine(NamedTuple): + lineno: int + section: str | None + name: str | None + value: str | None + + +def parse_ini_data( + path: str, + data: str, + *, + strip_inline_comments: bool, + strip_section_whitespace: bool = False, +) -> tuple[Mapping[str, Mapping[str, str]], Mapping[tuple[str, str | None], int]]: + """Parse INI data and return sections and sources mappings. + + Args: + path: Path for error messages + data: INI content as string + strip_inline_comments: Whether to strip inline comments from values + strip_section_whitespace: Whether to strip whitespace from section and key names + (default: False). When True, addresses issue #4 by stripping Unicode whitespace. + + Returns: + Tuple of (sections_data, sources) where: + - sections_data: mapping of section -> {name -> value} + - sources: mapping of (section, name) -> line number + """ + tokens = parse_lines( + path, + data.splitlines(True), + strip_inline_comments=strip_inline_comments, + strip_section_whitespace=strip_section_whitespace, + ) + + sources: dict[tuple[str, str | None], int] = {} + sections_data: dict[str, dict[str, str]] = {} + + for lineno, section, name, value in tokens: + if section is None: + raise ParseError(path, lineno, "no section header defined") + sources[section, name] = lineno + if name is None: + if section in sections_data: + raise ParseError(path, lineno, f"duplicate section {section!r}") + sections_data[section] = {} + else: + if name in sections_data[section]: + raise ParseError(path, lineno, f"duplicate name {name!r}") + assert value is not None + sections_data[section][name] = value + + return sections_data, sources + + +def parse_lines( + path: str, + line_iter: list[str], + *, + strip_inline_comments: bool = False, + strip_section_whitespace: bool = False, +) -> list[ParsedLine]: + result: list[ParsedLine] = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = _parseline( + path, line, lineno, strip_inline_comments, strip_section_whitespace + ) + # new value + if name is not None and data is not None: + result.append(ParsedLine(lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + raise ParseError(path, lineno, "empty section name") + section = name + result.append(ParsedLine(lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + raise ParseError(path, lineno, "unexpected value continuation") + last = result.pop() + if last.name is None: + raise ParseError(path, lineno, "unexpected value continuation") + + if last.value: + last = last._replace(value=f"{last.value}\n{data}") + else: + last = last._replace(value=data) + result.append(last) + return result + + +def _parseline( + path: str, + line: str, + lineno: int, + strip_inline_comments: bool, + strip_section_whitespace: bool, +) -> tuple[str | None, str | None]: + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == "[": + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + section_name = line[1:-1] + # Optionally strip whitespace from section name (issue #4) + if strip_section_whitespace: + section_name = section_name.strip() + return section_name, None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split("=", 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + raise ParseError(path, lineno, f"unexpected line: {line!r}") from None + + # Strip key name (always for backward compatibility, optionally with unicode awareness) + key_name = name.strip() + + # Strip value + value = value.strip() + # Strip inline comments from values if requested (issue #55) + if strip_inline_comments: + for c in COMMENTCHARS: + value = value.split(c)[0].rstrip() + + return key_name, value + # continuation + else: + line = line.strip() + # Strip inline comments from continuations if requested (issue #55) + if strip_inline_comments: + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + return None, line + + +def iscommentline(line: str) -> bool: + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_version.py b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_version.py new file mode 100644 index 0000000..b982b02 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/_version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '2.3.0' +__version_tuple__ = version_tuple = (2, 3, 0) + +__commit_id__ = commit_id = None diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/exceptions.py b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/exceptions.py new file mode 100644 index 0000000..d078bc6 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/exceptions.py @@ -0,0 +1,16 @@ +from typing import Final + + +class ParseError(Exception): + path: Final[str] + lineno: Final[int] + msg: Final[str] + + def __init__(self, path: str, lineno: int, msg: str) -> None: + super().__init__(path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self) -> str: + return f"{self.path}:{self.lineno + 1}: {self.msg}" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/py.typed b/tools/converter-generator/venv/lib/python3.11/site-packages/iniconfig/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/INSTALLER b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/METADATA b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/METADATA new file mode 100644 index 0000000..ffef2ff --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/METADATA @@ -0,0 +1,84 @@ +Metadata-Version: 2.4 +Name: Jinja2 +Version: 3.1.6 +Summary: A very fast and expressive template engine. +Maintainer-email: Pallets +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Typing :: Typed +License-File: LICENSE.txt +Requires-Dist: MarkupSafe>=2.0 +Requires-Dist: Babel>=2.7 ; extra == "i18n" +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Chat, https://discord.gg/pallets +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Source, https://github.com/pallets/jinja/ +Provides-Extra: i18n + +# Jinja + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +## In A Nutshell + +```jinja +{% extends "base.html" %} +{% block title %}Members{% endblock %} +{% block content %} + +{% endblock %} +``` + +## Donate + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, [please +donate today][]. + +[please donate today]: https://palletsprojects.com/donate + +## Contributing + +See our [detailed contributing documentation][contrib] for many ways to +contribute, including reporting issues, requesting features, asking or answering +questions, and making PRs. + +[contrib]: https://palletsprojects.com/contributing/ + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/RECORD b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/RECORD new file mode 100644 index 0000000..b48ff6c --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/RECORD @@ -0,0 +1,58 @@ +jinja2-3.1.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jinja2-3.1.6.dist-info/METADATA,sha256=aMVUj7Z8QTKhOJjZsx7FDGvqKr3ZFdkh8hQ1XDpkmcg,2871 +jinja2-3.1.6.dist-info/RECORD,, +jinja2-3.1.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2-3.1.6.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82 +jinja2-3.1.6.dist-info/entry_points.txt,sha256=OL85gYU1eD8cuPlikifFngXpeBjaxl6rIJ8KkC_3r-I,58 +jinja2-3.1.6.dist-info/licenses/LICENSE.txt,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +jinja2/__init__.py,sha256=xxepO9i7DHsqkQrgBEduLtfoz2QCuT6_gbL4XSN1hbU,1928 +jinja2/__pycache__/__init__.cpython-311.pyc,, +jinja2/__pycache__/_identifier.cpython-311.pyc,, +jinja2/__pycache__/async_utils.cpython-311.pyc,, +jinja2/__pycache__/bccache.cpython-311.pyc,, +jinja2/__pycache__/compiler.cpython-311.pyc,, +jinja2/__pycache__/constants.cpython-311.pyc,, +jinja2/__pycache__/debug.cpython-311.pyc,, +jinja2/__pycache__/defaults.cpython-311.pyc,, +jinja2/__pycache__/environment.cpython-311.pyc,, +jinja2/__pycache__/exceptions.cpython-311.pyc,, +jinja2/__pycache__/ext.cpython-311.pyc,, +jinja2/__pycache__/filters.cpython-311.pyc,, +jinja2/__pycache__/idtracking.cpython-311.pyc,, +jinja2/__pycache__/lexer.cpython-311.pyc,, +jinja2/__pycache__/loaders.cpython-311.pyc,, +jinja2/__pycache__/meta.cpython-311.pyc,, +jinja2/__pycache__/nativetypes.cpython-311.pyc,, +jinja2/__pycache__/nodes.cpython-311.pyc,, +jinja2/__pycache__/optimizer.cpython-311.pyc,, +jinja2/__pycache__/parser.cpython-311.pyc,, +jinja2/__pycache__/runtime.cpython-311.pyc,, +jinja2/__pycache__/sandbox.cpython-311.pyc,, +jinja2/__pycache__/tests.cpython-311.pyc,, +jinja2/__pycache__/utils.cpython-311.pyc,, +jinja2/__pycache__/visitor.cpython-311.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=vK-PdsuorOMnWSnEkT3iUJRIkTnYgO2T6MnGxDgHI5o,2834 +jinja2/bccache.py,sha256=gh0qs9rulnXo0PhX5jTJy2UHzI8wFnQ63o_vw7nhzRg,14061 +jinja2/compiler.py,sha256=9RpCQl5X88BHllJiPsHPh295Hh0uApvwFJNQuutULeM,74131 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=CnHqCDHd-BVGvti_8ZsTolnXNhA3ECsY-6n_2pwU8Hw,6297 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=9nhrP7Ch-NbGX00wvyr4yy-uhNHq2OCc60ggGrni_fk,61513 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=5PF5eHfh8mXAIxXHHRB2xXbXohi8pE3nHSOxa66uS7E,31875 +jinja2/filters.py,sha256=PQ_Egd9n9jSgtnGQYyF4K5j2nYwhUIulhPnyimkdr-k,55212 +jinja2/idtracking.py,sha256=-ll5lIp73pML3ErUYiIJj7tdmWxcH_IlDv3yA_hiZYo,10555 +jinja2/lexer.py,sha256=LYiYio6br-Tep9nPcupWXsPEtjluw3p1mU-lNBVRUfk,29786 +jinja2/loaders.py,sha256=wIrnxjvcbqh5VwW28NSkfotiDq8qNCxIOSFbGUiSLB4,24055 +jinja2/meta.py,sha256=OTDPkaFvU2Hgvx-6akz7154F8BIWaRmvJcBFvwopHww,4397 +jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210 +jinja2/nodes.py,sha256=m1Duzcr6qhZI8JQ6VyJgUNinjAf5bQzijSmDnMsvUx8,34579 +jinja2/optimizer.py,sha256=rJnCRlQ7pZsEEmMhsQDgC_pKyDHxP5TPS6zVPGsgcu8,1651 +jinja2/parser.py,sha256=lLOFy3sEmHc5IaEHRiH1sQVnId2moUQzhyeJZTtdY30,40383 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=gDk-GvdriJXqgsGbHgrcKTP0Yp6zPXzhzrIpCFH3jAU,34249 +jinja2/sandbox.py,sha256=Mw2aitlY2I8la7FYhcX2YG9BtUYcLnD0Gh3d29cDWrY,15009 +jinja2/tests.py,sha256=VLsBhVFnWg-PxSBz1MhRnNWgP1ovXk3neO1FLQMeC9Q,5926 +jinja2/utils.py,sha256=rRp3o9e7ZKS4fyrWRbELyLcpuGVTFcnooaOa1qx_FIk,24129 +jinja2/visitor.py,sha256=EcnL1PIwf_4RVCOMxsRNuR8AXHbS1qfAdMOE2ngKJz4,3557 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/REQUESTED b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/WHEEL b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/WHEEL new file mode 100644 index 0000000..23d2d7e --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.11.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/entry_points.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/entry_points.txt new file mode 100644 index 0000000..abc3eae --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[babel.extractors] +jinja2=jinja2.ext:babel_extract[i18n] + diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/licenses/LICENSE.txt b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..c37cae4 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2-3.1.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/__init__.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/__init__.py new file mode 100644 index 0000000..1a423a3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/__init__.py @@ -0,0 +1,38 @@ +"""Jinja is a template engine written in pure Python. It provides a +non-XML syntax that supports inline expressions and an optional +sandboxed environment. +""" + +from .bccache import BytecodeCache as BytecodeCache +from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache +from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache +from .environment import Environment as Environment +from .environment import Template as Template +from .exceptions import TemplateAssertionError as TemplateAssertionError +from .exceptions import TemplateError as TemplateError +from .exceptions import TemplateNotFound as TemplateNotFound +from .exceptions import TemplateRuntimeError as TemplateRuntimeError +from .exceptions import TemplatesNotFound as TemplatesNotFound +from .exceptions import TemplateSyntaxError as TemplateSyntaxError +from .exceptions import UndefinedError as UndefinedError +from .loaders import BaseLoader as BaseLoader +from .loaders import ChoiceLoader as ChoiceLoader +from .loaders import DictLoader as DictLoader +from .loaders import FileSystemLoader as FileSystemLoader +from .loaders import FunctionLoader as FunctionLoader +from .loaders import ModuleLoader as ModuleLoader +from .loaders import PackageLoader as PackageLoader +from .loaders import PrefixLoader as PrefixLoader +from .runtime import ChainableUndefined as ChainableUndefined +from .runtime import DebugUndefined as DebugUndefined +from .runtime import make_logging_undefined as make_logging_undefined +from .runtime import StrictUndefined as StrictUndefined +from .runtime import Undefined as Undefined +from .utils import clear_caches as clear_caches +from .utils import is_undefined as is_undefined +from .utils import pass_context as pass_context +from .utils import pass_environment as pass_environment +from .utils import pass_eval_context as pass_eval_context +from .utils import select_autoescape as select_autoescape + +__version__ = "3.1.6" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/_identifier.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/_identifier.py new file mode 100644 index 0000000..928c150 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/_identifier.py @@ -0,0 +1,6 @@ +import re + +# generated by scripts/generate_identifier_pattern.py +pattern = re.compile( + r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 +) diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/async_utils.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/async_utils.py new file mode 100644 index 0000000..f0c1402 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/async_utils.py @@ -0,0 +1,99 @@ +import inspect +import typing as t +from functools import WRAPPER_ASSIGNMENTS +from functools import wraps + +from .utils import _PassArg +from .utils import pass_eval_context + +if t.TYPE_CHECKING: + import typing_extensions as te + +V = t.TypeVar("V") + + +def async_variant(normal_func): # type: ignore + def decorator(async_func): # type: ignore + pass_arg = _PassArg.from_obj(normal_func) + need_eval_context = pass_arg is None + + if pass_arg is _PassArg.environment: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].is_async) + + else: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].environment.is_async) + + # Take the doc and annotations from the sync function, but the + # name from the async function. Pallets-Sphinx-Themes + # build_function_directive expects __wrapped__ to point to the + # sync function. + async_func_attrs = ("__module__", "__name__", "__qualname__") + normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs)) + + @wraps(normal_func, assigned=normal_func_attrs) + @wraps(async_func, assigned=async_func_attrs, updated=()) + def wrapper(*args, **kwargs): # type: ignore + b = is_async(args) + + if need_eval_context: + args = args[1:] + + if b: + return async_func(*args, **kwargs) + + return normal_func(*args, **kwargs) + + if need_eval_context: + wrapper = pass_eval_context(wrapper) + + wrapper.jinja_async_variant = True # type: ignore[attr-defined] + return wrapper + + return decorator + + +_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)} + + +async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V": + # Avoid a costly call to isawaitable + if type(value) in _common_primitives: + return t.cast("V", value) + + if inspect.isawaitable(value): + return await t.cast("t.Awaitable[V]", value) + + return value + + +class _IteratorToAsyncIterator(t.Generic[V]): + def __init__(self, iterator: "t.Iterator[V]"): + self._iterator = iterator + + def __aiter__(self) -> "te.Self": + return self + + async def __anext__(self) -> V: + try: + return next(self._iterator) + except StopIteration as e: + raise StopAsyncIteration(e.value) from e + + +def auto_aiter( + iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> "t.AsyncIterator[V]": + if hasattr(iterable, "__aiter__"): + return iterable.__aiter__() + else: + return _IteratorToAsyncIterator(iter(iterable)) + + +async def auto_to_list( + value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> t.List["V"]: + return [x async for x in auto_aiter(value)] diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/bccache.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/bccache.py new file mode 100644 index 0000000..ada8b09 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/bccache.py @@ -0,0 +1,408 @@ +"""The optional bytecode cache system. This is useful if you have very +complex template situations and the compilation of all those templates +slows down your application too much. + +Situations where this is useful are often forking web applications that +are initialized on the first request. +""" + +import errno +import fnmatch +import marshal +import os +import pickle +import stat +import sys +import tempfile +import typing as t +from hashlib import sha1 +from io import BytesIO +from types import CodeType + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .environment import Environment + + class _MemcachedClient(te.Protocol): + def get(self, key: str) -> bytes: ... + + def set( + self, key: str, value: bytes, timeout: t.Optional[int] = None + ) -> None: ... + + +bc_version = 5 +# Magic bytes to identify Jinja bytecode cache files. Contains the +# Python major and minor version to avoid loading incompatible bytecode +# if a project upgrades its Python version. +bc_magic = ( + b"j2" + + pickle.dumps(bc_version, 2) + + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) +) + + +class Bucket: + """Buckets are used to store the bytecode for one template. It's created + and initialized by the bytecode cache and passed to the loading functions. + + The buckets get an internal checksum from the cache assigned and use this + to automatically reject outdated cache material. Individual bytecode + cache subclasses don't have to care about cache invalidation. + """ + + def __init__(self, environment: "Environment", key: str, checksum: str) -> None: + self.environment = environment + self.key = key + self.checksum = checksum + self.reset() + + def reset(self) -> None: + """Resets the bucket (unloads the bytecode).""" + self.code: t.Optional[CodeType] = None + + def load_bytecode(self, f: t.BinaryIO) -> None: + """Loads bytecode from a file or file like object.""" + # make sure the magic header is correct + magic = f.read(len(bc_magic)) + if magic != bc_magic: + self.reset() + return + # the source code of the file changed, we need to reload + checksum = pickle.load(f) + if self.checksum != checksum: + self.reset() + return + # if marshal_load fails then we need to reload + try: + self.code = marshal.load(f) + except (EOFError, ValueError, TypeError): + self.reset() + return + + def write_bytecode(self, f: t.IO[bytes]) -> None: + """Dump the bytecode into the file or file like object passed.""" + if self.code is None: + raise TypeError("can't write empty bucket") + f.write(bc_magic) + pickle.dump(self.checksum, f, 2) + marshal.dump(self.code, f) + + def bytecode_from_string(self, string: bytes) -> None: + """Load bytecode from bytes.""" + self.load_bytecode(BytesIO(string)) + + def bytecode_to_string(self) -> bytes: + """Return the bytecode as bytes.""" + out = BytesIO() + self.write_bytecode(out) + return out.getvalue() + + +class BytecodeCache: + """To implement your own bytecode cache you have to subclass this class + and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of + these methods are passed a :class:`~jinja2.bccache.Bucket`. + + A very basic bytecode cache that saves the bytecode on the file system:: + + from os import path + + class MyCache(BytecodeCache): + + def __init__(self, directory): + self.directory = directory + + def load_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + if path.exists(filename): + with open(filename, 'rb') as f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + with open(filename, 'wb') as f: + bucket.write_bytecode(f) + + A more advanced version of a filesystem based bytecode cache is part of + Jinja. + """ + + def load_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to load bytecode into a + bucket. If they are not able to find code in the cache for the + bucket, it must not do anything. + """ + raise NotImplementedError() + + def dump_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to write the bytecode + from a bucket back to the cache. If it unable to do so it must not + fail silently but raise an exception. + """ + raise NotImplementedError() + + def clear(self) -> None: + """Clears the cache. This method is not used by Jinja but should be + implemented to allow applications to clear the bytecode cache used + by a particular environment. + """ + + def get_cache_key( + self, name: str, filename: t.Optional[t.Union[str]] = None + ) -> str: + """Returns the unique hash key for this template name.""" + hash = sha1(name.encode("utf-8")) + + if filename is not None: + hash.update(f"|{filename}".encode()) + + return hash.hexdigest() + + def get_source_checksum(self, source: str) -> str: + """Returns a checksum for the source.""" + return sha1(source.encode("utf-8")).hexdigest() + + def get_bucket( + self, + environment: "Environment", + name: str, + filename: t.Optional[str], + source: str, + ) -> Bucket: + """Return a cache bucket for the given template. All arguments are + mandatory but filename may be `None`. + """ + key = self.get_cache_key(name, filename) + checksum = self.get_source_checksum(source) + bucket = Bucket(environment, key, checksum) + self.load_bytecode(bucket) + return bucket + + def set_bucket(self, bucket: Bucket) -> None: + """Put the bucket into the cache.""" + self.dump_bytecode(bucket) + + +class FileSystemBytecodeCache(BytecodeCache): + """A bytecode cache that stores bytecode on the filesystem. It accepts + two arguments: The directory where the cache items are stored and a + pattern string that is used to build the filename. + + If no directory is specified a default cache directory is selected. On + Windows the user's temp directory is used, on UNIX systems a directory + is created for the user in the system temp directory. + + The pattern can be used to have multiple separate caches operate on the + same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` + is replaced with the cache key. + + >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') + + This bytecode cache supports clearing of the cache using the clear method. + """ + + def __init__( + self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache" + ) -> None: + if directory is None: + directory = self._get_default_cache_dir() + self.directory = directory + self.pattern = pattern + + def _get_default_cache_dir(self) -> str: + def _unsafe_dir() -> "te.NoReturn": + raise RuntimeError( + "Cannot determine safe temp directory. You " + "need to explicitly provide one." + ) + + tmpdir = tempfile.gettempdir() + + # On windows the temporary directory is used specific unless + # explicitly forced otherwise. We can just use that. + if os.name == "nt": + return tmpdir + if not hasattr(os, "getuid"): + _unsafe_dir() + + dirname = f"_jinja2-cache-{os.getuid()}" + actual_dir = os.path.join(tmpdir, dirname) + + try: + os.mkdir(actual_dir, stat.S_IRWXU) + except OSError as e: + if e.errno != errno.EEXIST: + raise + try: + os.chmod(actual_dir, stat.S_IRWXU) + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + except OSError as e: + if e.errno != errno.EEXIST: + raise + + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + + return actual_dir + + def _get_cache_filename(self, bucket: Bucket) -> str: + return os.path.join(self.directory, self.pattern % (bucket.key,)) + + def load_bytecode(self, bucket: Bucket) -> None: + filename = self._get_cache_filename(bucket) + + # Don't test for existence before opening the file, since the + # file could disappear after the test before the open. + try: + f = open(filename, "rb") + except (FileNotFoundError, IsADirectoryError, PermissionError): + # PermissionError can occur on Windows when an operation is + # in progress, such as calling clear(). + return + + with f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket: Bucket) -> None: + # Write to a temporary file, then rename to the real name after + # writing. This avoids another process reading the file before + # it is fully written. + name = self._get_cache_filename(bucket) + f = tempfile.NamedTemporaryFile( + mode="wb", + dir=os.path.dirname(name), + prefix=os.path.basename(name), + suffix=".tmp", + delete=False, + ) + + def remove_silent() -> None: + try: + os.remove(f.name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + pass + + try: + with f: + bucket.write_bytecode(f) + except BaseException: + remove_silent() + raise + + try: + os.replace(f.name, name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + remove_silent() + except BaseException: + remove_silent() + raise + + def clear(self) -> None: + # imported lazily here because google app-engine doesn't support + # write access on the file system and the function does not exist + # normally. + from os import remove + + files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",)) + for filename in files: + try: + remove(os.path.join(self.directory, filename)) + except OSError: + pass + + +class MemcachedBytecodeCache(BytecodeCache): + """This class implements a bytecode cache that uses a memcache cache for + storing the information. It does not enforce a specific memcache library + (tummy's memcache or cmemcache) but will accept any class that provides + the minimal interface required. + + Libraries compatible with this class: + + - `cachelib `_ + - `python-memcached `_ + + (Unfortunately the django cache interface is not compatible because it + does not support storing binary data, only text. You can however pass + the underlying cache client to the bytecode cache which is available + as `django.core.cache.cache._client`.) + + The minimal interface for the client passed to the constructor is this: + + .. class:: MinimalClientInterface + + .. method:: set(key, value[, timeout]) + + Stores the bytecode in the cache. `value` is a string and + `timeout` the timeout of the key. If timeout is not provided + a default timeout or no timeout should be assumed, if it's + provided it's an integer with the number of seconds the cache + item should exist. + + .. method:: get(key) + + Returns the value for the cache key. If the item does not + exist in the cache the return value must be `None`. + + The other arguments to the constructor are the prefix for all keys that + is added before the actual cache key and the timeout for the bytecode in + the cache system. We recommend a high (or no) timeout. + + This bytecode cache does not support clearing of used items in the cache. + The clear method is a no-operation function. + + .. versionadded:: 2.7 + Added support for ignoring memcache errors through the + `ignore_memcache_errors` parameter. + """ + + def __init__( + self, + client: "_MemcachedClient", + prefix: str = "jinja2/bytecode/", + timeout: t.Optional[int] = None, + ignore_memcache_errors: bool = True, + ): + self.client = client + self.prefix = prefix + self.timeout = timeout + self.ignore_memcache_errors = ignore_memcache_errors + + def load_bytecode(self, bucket: Bucket) -> None: + try: + code = self.client.get(self.prefix + bucket.key) + except Exception: + if not self.ignore_memcache_errors: + raise + else: + bucket.bytecode_from_string(code) + + def dump_bytecode(self, bucket: Bucket) -> None: + key = self.prefix + bucket.key + value = bucket.bytecode_to_string() + + try: + if self.timeout is not None: + self.client.set(key, value, self.timeout) + else: + self.client.set(key, value) + except Exception: + if not self.ignore_memcache_errors: + raise diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/compiler.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/compiler.py new file mode 100644 index 0000000..a4ff6a1 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/compiler.py @@ -0,0 +1,1998 @@ +"""Compiles nodes from the parser into Python code.""" + +import typing as t +from contextlib import contextmanager +from functools import update_wrapper +from io import StringIO +from itertools import chain +from keyword import iskeyword as is_python_keyword + +from markupsafe import escape +from markupsafe import Markup + +from . import nodes +from .exceptions import TemplateAssertionError +from .idtracking import Symbols +from .idtracking import VAR_LOAD_ALIAS +from .idtracking import VAR_LOAD_PARAMETER +from .idtracking import VAR_LOAD_RESOLVE +from .idtracking import VAR_LOAD_UNDEFINED +from .nodes import EvalContext +from .optimizer import Optimizer +from .utils import _PassArg +from .utils import concat +from .visitor import NodeVisitor + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .environment import Environment + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) + +operators = { + "eq": "==", + "ne": "!=", + "gt": ">", + "gteq": ">=", + "lt": "<", + "lteq": "<=", + "in": "in", + "notin": "not in", +} + + +def optimizeconst(f: F) -> F: + def new_func( + self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any + ) -> t.Any: + # Only optimize if the frame is not volatile + if self.optimizer is not None and not frame.eval_ctx.volatile: + new_node = self.optimizer.visit(node, frame.eval_ctx) + + if new_node != node: + return self.visit(new_node, frame) + + return f(self, node, frame, **kwargs) + + return update_wrapper(new_func, f) # type: ignore[return-value] + + +def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed and op in self.environment.intercepted_binops # type: ignore + ): + self.write(f"environment.call_binop(context, {op!r}, ") + self.visit(node.left, frame) + self.write(", ") + self.visit(node.right, frame) + else: + self.write("(") + self.visit(node.left, frame) + self.write(f" {op} ") + self.visit(node.right, frame) + + self.write(")") + + return visitor + + +def _make_unop( + op: str, +) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed and op in self.environment.intercepted_unops # type: ignore + ): + self.write(f"environment.call_unop(context, {op!r}, ") + self.visit(node.node, frame) + else: + self.write("(" + op) + self.visit(node.node, frame) + + self.write(")") + + return visitor + + +def generate( + node: nodes.Template, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, +) -> t.Optional[str]: + """Generate the python source for a node tree.""" + if not isinstance(node, nodes.Template): + raise TypeError("Can't compile non template nodes") + + generator = environment.code_generator_class( + environment, name, filename, stream, defer_init, optimized + ) + generator.visit(node) + + if stream is None: + return generator.stream.getvalue() # type: ignore + + return None + + +def has_safe_repr(value: t.Any) -> bool: + """Does the node have a safe representation?""" + if value is None or value is NotImplemented or value is Ellipsis: + return True + + if type(value) in {bool, int, float, complex, range, str, Markup}: + return True + + if type(value) in {tuple, list, set, frozenset}: + return all(has_safe_repr(v) for v in value) + + if type(value) is dict: # noqa E721 + return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items()) + + return False + + +def find_undeclared( + nodes: t.Iterable[nodes.Node], names: t.Iterable[str] +) -> t.Set[str]: + """Check if the names passed are accessed undeclared. The return value + is a set of all the undeclared names from the sequence of names found. + """ + visitor = UndeclaredNameVisitor(names) + try: + for node in nodes: + visitor.visit(node) + except VisitorExit: + pass + return visitor.undeclared + + +class MacroRef: + def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None: + self.node = node + self.accesses_caller = False + self.accesses_kwargs = False + self.accesses_varargs = False + + +class Frame: + """Holds compile time information for us.""" + + def __init__( + self, + eval_ctx: EvalContext, + parent: t.Optional["Frame"] = None, + level: t.Optional[int] = None, + ) -> None: + self.eval_ctx = eval_ctx + + # the parent of this frame + self.parent = parent + + if parent is None: + self.symbols = Symbols(level=level) + + # in some dynamic inheritance situations the compiler needs to add + # write tests around output statements. + self.require_output_check = False + + # inside some tags we are using a buffer rather than yield statements. + # this for example affects {% filter %} or {% macro %}. If a frame + # is buffered this variable points to the name of the list used as + # buffer. + self.buffer: t.Optional[str] = None + + # the name of the block we're in, otherwise None. + self.block: t.Optional[str] = None + + else: + self.symbols = Symbols(parent.symbols, level=level) + self.require_output_check = parent.require_output_check + self.buffer = parent.buffer + self.block = parent.block + + # a toplevel frame is the root + soft frames such as if conditions. + self.toplevel = False + + # the root frame is basically just the outermost frame, so no if + # conditions. This information is used to optimize inheritance + # situations. + self.rootlevel = False + + # variables set inside of loops and blocks should not affect outer frames, + # but they still needs to be kept track of as part of the active context. + self.loop_frame = False + self.block_frame = False + + # track whether the frame is being used in an if-statement or conditional + # expression as it determines which errors should be raised during runtime + # or compile time. + self.soft_frame = False + + def copy(self) -> "te.Self": + """Create a copy of the current one.""" + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.symbols = self.symbols.copy() + return rv + + def inner(self, isolated: bool = False) -> "Frame": + """Return an inner frame.""" + if isolated: + return Frame(self.eval_ctx, level=self.symbols.level + 1) + return Frame(self.eval_ctx, self) + + def soft(self) -> "te.Self": + """Return a soft frame. A soft frame may not be modified as + standalone thing as it shares the resources with the frame it + was created of, but it's not a rootlevel frame any longer. + + This is only used to implement if-statements and conditional + expressions. + """ + rv = self.copy() + rv.rootlevel = False + rv.soft_frame = True + return rv + + __copy__ = copy + + +class VisitorExit(RuntimeError): + """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" + + +class DependencyFinderVisitor(NodeVisitor): + """A visitor that collects filter and test calls.""" + + def __init__(self) -> None: + self.filters: t.Set[str] = set() + self.tests: t.Set[str] = set() + + def visit_Filter(self, node: nodes.Filter) -> None: + self.generic_visit(node) + self.filters.add(node.name) + + def visit_Test(self, node: nodes.Test) -> None: + self.generic_visit(node) + self.tests.add(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting at blocks.""" + + +class UndeclaredNameVisitor(NodeVisitor): + """A visitor that checks if a name is accessed without being + declared. This is different from the frame visitor as it will + not stop at closure frames. + """ + + def __init__(self, names: t.Iterable[str]) -> None: + self.names = set(names) + self.undeclared: t.Set[str] = set() + + def visit_Name(self, node: nodes.Name) -> None: + if node.ctx == "load" and node.name in self.names: + self.undeclared.add(node.name) + if self.undeclared == self.names: + raise VisitorExit() + else: + self.names.discard(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting a blocks.""" + + +class CompilerExit(Exception): + """Raised if the compiler encountered a situation where it just + doesn't make sense to further process the code. Any block that + raises such an exception is not further processed. + """ + + +class CodeGenerator(NodeVisitor): + def __init__( + self, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, + ) -> None: + if stream is None: + stream = StringIO() + self.environment = environment + self.name = name + self.filename = filename + self.stream = stream + self.created_block_context = False + self.defer_init = defer_init + self.optimizer: t.Optional[Optimizer] = None + + if optimized: + self.optimizer = Optimizer(environment) + + # aliases for imports + self.import_aliases: t.Dict[str, str] = {} + + # a registry for all blocks. Because blocks are moved out + # into the global python scope they are registered here + self.blocks: t.Dict[str, nodes.Block] = {} + + # the number of extends statements so far + self.extends_so_far = 0 + + # some templates have a rootlevel extends. In this case we + # can safely assume that we're a child template and do some + # more optimizations. + self.has_known_extends = False + + # the current line number + self.code_lineno = 1 + + # registry of all filters and tests (global, not block local) + self.tests: t.Dict[str, str] = {} + self.filters: t.Dict[str, str] = {} + + # the debug information + self.debug_info: t.List[t.Tuple[int, int]] = [] + self._write_debug_info: t.Optional[int] = None + + # the number of new lines before the next write() + self._new_lines = 0 + + # the line number of the last written statement + self._last_line = 0 + + # true if nothing was written so far. + self._first_write = True + + # used by the `temporary_identifier` method to get new + # unique, temporary identifier + self._last_identifier = 0 + + # the current indentation + self._indentation = 0 + + # Tracks toplevel assignments + self._assign_stack: t.List[t.Set[str]] = [] + + # Tracks parameter definition blocks + self._param_def_block: t.List[t.Set[str]] = [] + + # Tracks the current context. + self._context_reference_stack = ["context"] + + @property + def optimized(self) -> bool: + return self.optimizer is not None + + # -- Various compilation helpers + + def fail(self, msg: str, lineno: int) -> "te.NoReturn": + """Fail with a :exc:`TemplateAssertionError`.""" + raise TemplateAssertionError(msg, lineno, self.name, self.filename) + + def temporary_identifier(self) -> str: + """Get a new unique identifier.""" + self._last_identifier += 1 + return f"t_{self._last_identifier}" + + def buffer(self, frame: Frame) -> None: + """Enable buffering for the frame from that point onwards.""" + frame.buffer = self.temporary_identifier() + self.writeline(f"{frame.buffer} = []") + + def return_buffer_contents( + self, frame: Frame, force_unescaped: bool = False + ) -> None: + """Return the buffer contents of the frame.""" + if not force_unescaped: + if frame.eval_ctx.volatile: + self.writeline("if context.eval_ctx.autoescape:") + self.indent() + self.writeline(f"return Markup(concat({frame.buffer}))") + self.outdent() + self.writeline("else:") + self.indent() + self.writeline(f"return concat({frame.buffer})") + self.outdent() + return + elif frame.eval_ctx.autoescape: + self.writeline(f"return Markup(concat({frame.buffer}))") + return + self.writeline(f"return concat({frame.buffer})") + + def indent(self) -> None: + """Indent by one.""" + self._indentation += 1 + + def outdent(self, step: int = 1) -> None: + """Outdent by step.""" + self._indentation -= step + + def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None: + """Yield or write into the frame buffer.""" + if frame.buffer is None: + self.writeline("yield ", node) + else: + self.writeline(f"{frame.buffer}.append(", node) + + def end_write(self, frame: Frame) -> None: + """End the writing process started by `start_write`.""" + if frame.buffer is not None: + self.write(")") + + def simple_write( + self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None + ) -> None: + """Simple shortcut for start_write + write + end_write.""" + self.start_write(frame, node) + self.write(s) + self.end_write(frame) + + def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None: + """Visit a list of nodes as block in a frame. If the current frame + is no buffer a dummy ``if 0: yield None`` is written automatically. + """ + try: + self.writeline("pass") + for node in nodes: + self.visit(node, frame) + except CompilerExit: + pass + + def write(self, x: str) -> None: + """Write a string into the output stream.""" + if self._new_lines: + if not self._first_write: + self.stream.write("\n" * self._new_lines) + self.code_lineno += self._new_lines + if self._write_debug_info is not None: + self.debug_info.append((self._write_debug_info, self.code_lineno)) + self._write_debug_info = None + self._first_write = False + self.stream.write(" " * self._indentation) + self._new_lines = 0 + self.stream.write(x) + + def writeline( + self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0 + ) -> None: + """Combination of newline and write.""" + self.newline(node, extra) + self.write(x) + + def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None: + """Add one or more newlines before the next write.""" + self._new_lines = max(self._new_lines, 1 + extra) + if node is not None and node.lineno != self._last_line: + self._write_debug_info = node.lineno + self._last_line = node.lineno + + def signature( + self, + node: t.Union[nodes.Call, nodes.Filter, nodes.Test], + frame: Frame, + extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + ) -> None: + """Writes a function call to the stream for the current node. + A leading comma is added automatically. The extra keyword + arguments may not include python keywords otherwise a syntax + error could occur. The extra keyword arguments should be given + as python dict. + """ + # if any of the given keyword arguments is a python keyword + # we have to make sure that no invalid call is created. + kwarg_workaround = any( + is_python_keyword(t.cast(str, k)) + for k in chain((x.key for x in node.kwargs), extra_kwargs or ()) + ) + + for arg in node.args: + self.write(", ") + self.visit(arg, frame) + + if not kwarg_workaround: + for kwarg in node.kwargs: + self.write(", ") + self.visit(kwarg, frame) + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f", {key}={value}") + if node.dyn_args: + self.write(", *") + self.visit(node.dyn_args, frame) + + if kwarg_workaround: + if node.dyn_kwargs is not None: + self.write(", **dict({") + else: + self.write(", **{") + for kwarg in node.kwargs: + self.write(f"{kwarg.key!r}: ") + self.visit(kwarg.value, frame) + self.write(", ") + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f"{key!r}: {value}, ") + if node.dyn_kwargs is not None: + self.write("}, **") + self.visit(node.dyn_kwargs, frame) + self.write(")") + else: + self.write("}") + + elif node.dyn_kwargs is not None: + self.write(", **") + self.visit(node.dyn_kwargs, frame) + + def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None: + """Find all filter and test names used in the template and + assign them to variables in the compiled namespace. Checking + that the names are registered with the environment is done when + compiling the Filter and Test nodes. If the node is in an If or + CondExpr node, the check is done at runtime instead. + + .. versionchanged:: 3.0 + Filters and tests in If and CondExpr nodes are checked at + runtime instead of compile time. + """ + visitor = DependencyFinderVisitor() + + for node in nodes: + visitor.visit(node) + + for id_map, names, dependency in ( + (self.filters, visitor.filters, "filters"), + ( + self.tests, + visitor.tests, + "tests", + ), + ): + for name in sorted(names): + if name not in id_map: + id_map[name] = self.temporary_identifier() + + # add check during runtime that dependencies used inside of executed + # blocks are defined, as this step may be skipped during compile time + self.writeline("try:") + self.indent() + self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]") + self.outdent() + self.writeline("except KeyError:") + self.indent() + self.writeline("@internalcode") + self.writeline(f"def {id_map[name]}(*unused):") + self.indent() + self.writeline( + f'raise TemplateRuntimeError("No {dependency[:-1]}' + f' named {name!r} found.")' + ) + self.outdent() + self.outdent() + + def enter_frame(self, frame: Frame) -> None: + undefs = [] + for target, (action, param) in frame.symbols.loads.items(): + if action == VAR_LOAD_PARAMETER: + pass + elif action == VAR_LOAD_RESOLVE: + self.writeline(f"{target} = {self.get_resolve_func()}({param!r})") + elif action == VAR_LOAD_ALIAS: + self.writeline(f"{target} = {param}") + elif action == VAR_LOAD_UNDEFINED: + undefs.append(target) + else: + raise NotImplementedError("unknown load instruction") + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None: + if not with_python_scope: + undefs = [] + for target in frame.symbols.loads: + undefs.append(target) + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str: + return async_value if self.environment.is_async else sync_value + + def func(self, name: str) -> str: + return f"{self.choose_async()}def {name}" + + def macro_body( + self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame + ) -> t.Tuple[Frame, MacroRef]: + """Dump the function def of a macro or call block.""" + frame = frame.inner() + frame.symbols.analyze_node(node) + macro_ref = MacroRef(node) + + explicit_caller = None + skip_special_params = set() + args = [] + + for idx, arg in enumerate(node.args): + if arg.name == "caller": + explicit_caller = idx + if arg.name in ("kwargs", "varargs"): + skip_special_params.add(arg.name) + args.append(frame.symbols.ref(arg.name)) + + undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) + + if "caller" in undeclared: + # In older Jinja versions there was a bug that allowed caller + # to retain the special behavior even if it was mentioned in + # the argument list. However thankfully this was only really + # working if it was the last argument. So we are explicitly + # checking this now and error out if it is anywhere else in + # the argument list. + if explicit_caller is not None: + try: + node.defaults[explicit_caller - len(node.args)] + except IndexError: + self.fail( + "When defining macros or call blocks the " + 'special "caller" argument must be omitted ' + "or be given a default.", + node.lineno, + ) + else: + args.append(frame.symbols.declare_parameter("caller")) + macro_ref.accesses_caller = True + if "kwargs" in undeclared and "kwargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("kwargs")) + macro_ref.accesses_kwargs = True + if "varargs" in undeclared and "varargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("varargs")) + macro_ref.accesses_varargs = True + + # macros are delayed, they never require output checks + frame.require_output_check = False + frame.symbols.analyze_node(node) + self.writeline(f"{self.func('macro')}({', '.join(args)}):", node) + self.indent() + + self.buffer(frame) + self.enter_frame(frame) + + self.push_parameter_definitions(frame) + for idx, arg in enumerate(node.args): + ref = frame.symbols.ref(arg.name) + self.writeline(f"if {ref} is missing:") + self.indent() + try: + default = node.defaults[idx - len(node.args)] + except IndexError: + self.writeline( + f'{ref} = undefined("parameter {arg.name!r} was not provided",' + f" name={arg.name!r})" + ) + else: + self.writeline(f"{ref} = ") + self.visit(default, frame) + self.mark_parameter_stored(ref) + self.outdent() + self.pop_parameter_definitions() + + self.blockvisit(node.body, frame) + self.return_buffer_contents(frame, force_unescaped=True) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + return frame, macro_ref + + def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None: + """Dump the macro definition for the def created by macro_body.""" + arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) + name = getattr(macro_ref.node, "name", None) + if len(macro_ref.node.args) == 1: + arg_tuple += "," + self.write( + f"Macro(environment, macro, {name!r}, ({arg_tuple})," + f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r}," + f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)" + ) + + def position(self, node: nodes.Node) -> str: + """Return a human readable position for the node.""" + rv = f"line {node.lineno}" + if self.name is not None: + rv = f"{rv} in {self.name!r}" + return rv + + def dump_local_context(self, frame: Frame) -> str: + items_kv = ", ".join( + f"{name!r}: {target}" + for name, target in frame.symbols.dump_stores().items() + ) + return f"{{{items_kv}}}" + + def write_commons(self) -> None: + """Writes a common preamble that is used by root and block functions. + Primarily this sets up common local helpers and enforces a generator + through a dead branch. + """ + self.writeline("resolve = context.resolve_or_missing") + self.writeline("undefined = environment.undefined") + self.writeline("concat = environment.concat") + # always use the standard Undefined class for the implicit else of + # conditional expressions + self.writeline("cond_expr_undefined = Undefined") + self.writeline("if 0: yield None") + + def push_parameter_definitions(self, frame: Frame) -> None: + """Pushes all parameter targets from the given frame into a local + stack that permits tracking of yet to be assigned parameters. In + particular this enables the optimization from `visit_Name` to skip + undefined expressions for parameters in macros as macros can reference + otherwise unbound parameters. + """ + self._param_def_block.append(frame.symbols.dump_param_targets()) + + def pop_parameter_definitions(self) -> None: + """Pops the current parameter definitions set.""" + self._param_def_block.pop() + + def mark_parameter_stored(self, target: str) -> None: + """Marks a parameter in the current parameter definitions as stored. + This will skip the enforced undefined checks. + """ + if self._param_def_block: + self._param_def_block[-1].discard(target) + + def push_context_reference(self, target: str) -> None: + self._context_reference_stack.append(target) + + def pop_context_reference(self) -> None: + self._context_reference_stack.pop() + + def get_context_ref(self) -> str: + return self._context_reference_stack[-1] + + def get_resolve_func(self) -> str: + target = self._context_reference_stack[-1] + if target == "context": + return "resolve" + return f"{target}.resolve" + + def derive_context(self, frame: Frame) -> str: + return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})" + + def parameter_is_undeclared(self, target: str) -> bool: + """Checks if a given target is an undeclared parameter.""" + if not self._param_def_block: + return False + return target in self._param_def_block[-1] + + def push_assign_tracking(self) -> None: + """Pushes a new layer for assignment tracking.""" + self._assign_stack.append(set()) + + def pop_assign_tracking(self, frame: Frame) -> None: + """Pops the topmost level for assignment tracking and updates the + context variables if necessary. + """ + vars = self._assign_stack.pop() + if ( + not frame.block_frame + and not frame.loop_frame + and not frame.toplevel + or not vars + ): + return + public_names = [x for x in vars if x[:1] != "_"] + if len(vars) == 1: + name = next(iter(vars)) + ref = frame.symbols.ref(name) + if frame.loop_frame: + self.writeline(f"_loop_vars[{name!r}] = {ref}") + return + if frame.block_frame: + self.writeline(f"_block_vars[{name!r}] = {ref}") + return + self.writeline(f"context.vars[{name!r}] = {ref}") + else: + if frame.loop_frame: + self.writeline("_loop_vars.update({") + elif frame.block_frame: + self.writeline("_block_vars.update({") + else: + self.writeline("context.vars.update({") + for idx, name in enumerate(sorted(vars)): + if idx: + self.write(", ") + ref = frame.symbols.ref(name) + self.write(f"{name!r}: {ref}") + self.write("})") + if not frame.block_frame and not frame.loop_frame and public_names: + if len(public_names) == 1: + self.writeline(f"context.exported_vars.add({public_names[0]!r})") + else: + names_str = ", ".join(map(repr, sorted(public_names))) + self.writeline(f"context.exported_vars.update(({names_str}))") + + # -- Statement Visitors + + def visit_Template( + self, node: nodes.Template, frame: t.Optional[Frame] = None + ) -> None: + assert frame is None, "no root frame allowed" + eval_ctx = EvalContext(self.environment, self.name) + + from .runtime import async_exported + from .runtime import exported + + if self.environment.is_async: + exported_names = sorted(exported + async_exported) + else: + exported_names = sorted(exported) + + self.writeline("from jinja2.runtime import " + ", ".join(exported_names)) + + # if we want a deferred initialization we cannot move the + # environment into a local name + envenv = "" if self.defer_init else ", environment=environment" + + # do we have an extends tag at all? If not, we can save some + # overhead by just not processing any inheritance code. + have_extends = node.find(nodes.Extends) is not None + + # find all blocks + for block in node.find_all(nodes.Block): + if block.name in self.blocks: + self.fail(f"block {block.name!r} defined twice", block.lineno) + self.blocks[block.name] = block + + # find all imports and import them + for import_ in node.find_all(nodes.ImportedName): + if import_.importname not in self.import_aliases: + imp = import_.importname + self.import_aliases[imp] = alias = self.temporary_identifier() + if "." in imp: + module, obj = imp.rsplit(".", 1) + self.writeline(f"from {module} import {obj} as {alias}") + else: + self.writeline(f"import {imp} as {alias}") + + # add the load name + self.writeline(f"name = {self.name!r}") + + # generate the root render function. + self.writeline( + f"{self.func('root')}(context, missing=missing{envenv}):", extra=1 + ) + self.indent() + self.write_commons() + + # process the root + frame = Frame(eval_ctx) + if "self" in find_undeclared(node.body, ("self",)): + ref = frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + frame.symbols.analyze_node(node) + frame.toplevel = frame.rootlevel = True + frame.require_output_check = have_extends and not self.has_known_extends + if have_extends: + self.writeline("parent_template = None") + self.enter_frame(frame) + self.pull_dependencies(node.body) + self.blockvisit(node.body, frame) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + # make sure that the parent root is called. + if have_extends: + if not self.has_known_extends: + self.indent() + self.writeline("if parent_template is not None:") + self.indent() + if not self.environment.is_async: + self.writeline("yield from parent_template.root_render_func(context)") + else: + self.writeline("agen = parent_template.root_render_func(context)") + self.writeline("try:") + self.indent() + self.writeline("async for event in agen:") + self.indent() + self.writeline("yield event") + self.outdent() + self.outdent() + self.writeline("finally: await agen.aclose()") + self.outdent(1 + (not self.has_known_extends)) + + # at this point we now have the blocks collected and can visit them too. + for name, block in self.blocks.items(): + self.writeline( + f"{self.func('block_' + name)}(context, missing=missing{envenv}):", + block, + 1, + ) + self.indent() + self.write_commons() + # It's important that we do not make this frame a child of the + # toplevel template. This would cause a variety of + # interesting issues with identifier tracking. + block_frame = Frame(eval_ctx) + block_frame.block_frame = True + undeclared = find_undeclared(block.body, ("self", "super")) + if "self" in undeclared: + ref = block_frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + if "super" in undeclared: + ref = block_frame.symbols.declare_parameter("super") + self.writeline(f"{ref} = context.super({name!r}, block_{name})") + block_frame.symbols.analyze_node(block) + block_frame.block = name + self.writeline("_block_vars = {}") + self.enter_frame(block_frame) + self.pull_dependencies(block.body) + self.blockvisit(block.body, block_frame) + self.leave_frame(block_frame, with_python_scope=True) + self.outdent() + + blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks) + self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1) + debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info) + self.writeline(f"debug_info = {debug_kv_str!r}") + + def visit_Block(self, node: nodes.Block, frame: Frame) -> None: + """Call a block and register it for the template.""" + level = 0 + if frame.toplevel: + # if we know that we are a child template, there is no need to + # check if we are one + if self.has_known_extends: + return + if self.extends_so_far > 0: + self.writeline("if parent_template is None:") + self.indent() + level += 1 + + if node.scoped: + context = self.derive_context(frame) + else: + context = self.get_context_ref() + + if node.required: + self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node) + self.indent() + self.writeline( + f'raise TemplateRuntimeError("Required block {node.name!r} not found")', + node, + ) + self.outdent() + + if not self.environment.is_async and frame.buffer is None: + self.writeline( + f"yield from context.blocks[{node.name!r}][0]({context})", node + ) + else: + self.writeline(f"gen = context.blocks[{node.name!r}][0]({context})") + self.writeline("try:") + self.indent() + self.writeline( + f"{self.choose_async()}for event in gen:", + node, + ) + self.indent() + self.simple_write("event", frame) + self.outdent() + self.outdent() + self.writeline( + f"finally: {self.choose_async('await gen.aclose()', 'gen.close()')}" + ) + + self.outdent(level) + + def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None: + """Calls the extender.""" + if not frame.toplevel: + self.fail("cannot use extend from a non top-level scope", node.lineno) + + # if the number of extends statements in general is zero so + # far, we don't have to add a check if something extended + # the template before this one. + if self.extends_so_far > 0: + # if we have a known extends we just add a template runtime + # error into the generated code. We could catch that at compile + # time too, but i welcome it not to confuse users by throwing the + # same error at different times just "because we can". + if not self.has_known_extends: + self.writeline("if parent_template is not None:") + self.indent() + self.writeline('raise TemplateRuntimeError("extended multiple times")') + + # if we have a known extends already we don't need that code here + # as we know that the template execution will end here. + if self.has_known_extends: + raise CompilerExit() + else: + self.outdent() + + self.writeline("parent_template = environment.get_template(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + self.writeline("for name, parent_block in parent_template.blocks.items():") + self.indent() + self.writeline("context.blocks.setdefault(name, []).append(parent_block)") + self.outdent() + + # if this extends statement was in the root level we can take + # advantage of that information and simplify the generated code + # in the top level from this point onwards + if frame.rootlevel: + self.has_known_extends = True + + # and now we have one more + self.extends_so_far += 1 + + def visit_Include(self, node: nodes.Include, frame: Frame) -> None: + """Handles includes.""" + if node.ignore_missing: + self.writeline("try:") + self.indent() + + func_name = "get_or_select_template" + if isinstance(node.template, nodes.Const): + if isinstance(node.template.value, str): + func_name = "get_template" + elif isinstance(node.template.value, (tuple, list)): + func_name = "select_template" + elif isinstance(node.template, (nodes.Tuple, nodes.List)): + func_name = "select_template" + + self.writeline(f"template = environment.{func_name}(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + if node.ignore_missing: + self.outdent() + self.writeline("except TemplateNotFound:") + self.indent() + self.writeline("pass") + self.outdent() + self.writeline("else:") + self.indent() + + def loop_body() -> None: + self.indent() + self.simple_write("event", frame) + self.outdent() + + if node.with_context: + self.writeline( + f"gen = template.root_render_func(" + "template.new_context(context.get_all(), True," + f" {self.dump_local_context(frame)}))" + ) + self.writeline("try:") + self.indent() + self.writeline(f"{self.choose_async()}for event in gen:") + loop_body() + self.outdent() + self.writeline( + f"finally: {self.choose_async('await gen.aclose()', 'gen.close()')}" + ) + elif self.environment.is_async: + self.writeline( + "for event in (await template._get_default_module_async())" + "._body_stream:" + ) + loop_body() + else: + self.writeline("yield from template._get_default_module()._body_stream") + + if node.ignore_missing: + self.outdent() + + def _import_common( + self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame + ) -> None: + self.write(f"{self.choose_async('await ')}environment.get_template(") + self.visit(node.template, frame) + self.write(f", {self.name!r}).") + + if node.with_context: + f_name = f"make_module{self.choose_async('_async')}" + self.write( + f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})" + ) + else: + self.write(f"_get_default_module{self.choose_async('_async')}(context)") + + def visit_Import(self, node: nodes.Import, frame: Frame) -> None: + """Visit regular imports.""" + self.writeline(f"{frame.symbols.ref(node.target)} = ", node) + if frame.toplevel: + self.write(f"context.vars[{node.target!r}] = ") + + self._import_common(node, frame) + + if frame.toplevel and not node.target.startswith("_"): + self.writeline(f"context.exported_vars.discard({node.target!r})") + + def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None: + """Visit named imports.""" + self.newline(node) + self.write("included_template = ") + self._import_common(node, frame) + var_names = [] + discarded_names = [] + for name in node.names: + if isinstance(name, tuple): + name, alias = name + else: + alias = name + self.writeline( + f"{frame.symbols.ref(alias)} =" + f" getattr(included_template, {name!r}, missing)" + ) + self.writeline(f"if {frame.symbols.ref(alias)} is missing:") + self.indent() + # The position will contain the template name, and will be formatted + # into a string that will be compiled into an f-string. Curly braces + # in the name must be replaced with escapes so that they will not be + # executed as part of the f-string. + position = self.position(node).replace("{", "{{").replace("}", "}}") + message = ( + "the template {included_template.__name__!r}" + f" (imported on {position})" + f" does not export the requested name {name!r}" + ) + self.writeline( + f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})" + ) + self.outdent() + if frame.toplevel: + var_names.append(alias) + if not alias.startswith("_"): + discarded_names.append(alias) + + if var_names: + if len(var_names) == 1: + name = var_names[0] + self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}") + else: + names_kv = ", ".join( + f"{name!r}: {frame.symbols.ref(name)}" for name in var_names + ) + self.writeline(f"context.vars.update({{{names_kv}}})") + if discarded_names: + if len(discarded_names) == 1: + self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})") + else: + names_str = ", ".join(map(repr, discarded_names)) + self.writeline( + f"context.exported_vars.difference_update(({names_str}))" + ) + + def visit_For(self, node: nodes.For, frame: Frame) -> None: + loop_frame = frame.inner() + loop_frame.loop_frame = True + test_frame = frame.inner() + else_frame = frame.inner() + + # try to figure out if we have an extended loop. An extended loop + # is necessary if the loop is in recursive mode if the special loop + # variable is accessed in the body if the body is a scoped block. + extended_loop = ( + node.recursive + or "loop" + in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",)) + or any(block.scoped for block in node.find_all(nodes.Block)) + ) + + loop_ref = None + if extended_loop: + loop_ref = loop_frame.symbols.declare_parameter("loop") + + loop_frame.symbols.analyze_node(node, for_branch="body") + if node.else_: + else_frame.symbols.analyze_node(node, for_branch="else") + + if node.test: + loop_filter_func = self.temporary_identifier() + test_frame.symbols.analyze_node(node, for_branch="test") + self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test) + self.indent() + self.enter_frame(test_frame) + self.writeline(self.choose_async("async for ", "for ")) + self.visit(node.target, loop_frame) + self.write(" in ") + self.write(self.choose_async("auto_aiter(fiter)", "fiter")) + self.write(":") + self.indent() + self.writeline("if ", node.test) + self.visit(node.test, test_frame) + self.write(":") + self.indent() + self.writeline("yield ") + self.visit(node.target, loop_frame) + self.outdent(3) + self.leave_frame(test_frame, with_python_scope=True) + + # if we don't have an recursive loop we have to find the shadowed + # variables at that point. Because loops can be nested but the loop + # variable is a special one we have to enforce aliasing for it. + if node.recursive: + self.writeline( + f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node + ) + self.indent() + self.buffer(loop_frame) + + # Use the same buffer for the else frame + else_frame.buffer = loop_frame.buffer + + # make sure the loop variable is a special one and raise a template + # assertion error if a loop tries to write to loop + if extended_loop: + self.writeline(f"{loop_ref} = missing") + + for name in node.find_all(nodes.Name): + if name.ctx == "store" and name.name == "loop": + self.fail( + "Can't assign to special loop variable in for-loop target", + name.lineno, + ) + + if node.else_: + iteration_indicator = self.temporary_identifier() + self.writeline(f"{iteration_indicator} = 1") + + self.writeline(self.choose_async("async for ", "for "), node) + self.visit(node.target, loop_frame) + if extended_loop: + self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(") + else: + self.write(" in ") + + if node.test: + self.write(f"{loop_filter_func}(") + if node.recursive: + self.write("reciter") + else: + if self.environment.is_async and not extended_loop: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async and not extended_loop: + self.write(")") + if node.test: + self.write(")") + + if node.recursive: + self.write(", undefined, loop_render_func, depth):") + else: + self.write(", undefined):" if extended_loop else ":") + + self.indent() + self.enter_frame(loop_frame) + + self.writeline("_loop_vars = {}") + self.blockvisit(node.body, loop_frame) + if node.else_: + self.writeline(f"{iteration_indicator} = 0") + self.outdent() + self.leave_frame( + loop_frame, with_python_scope=node.recursive and not node.else_ + ) + + if node.else_: + self.writeline(f"if {iteration_indicator}:") + self.indent() + self.enter_frame(else_frame) + self.blockvisit(node.else_, else_frame) + self.leave_frame(else_frame) + self.outdent() + + # if the node was recursive we have to return the buffer contents + # and start the iteration code + if node.recursive: + self.return_buffer_contents(loop_frame) + self.outdent() + self.start_write(frame, node) + self.write(f"{self.choose_async('await ')}loop(") + if self.environment.is_async: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async: + self.write(")") + self.write(", loop)") + self.end_write(frame) + + # at the end of the iteration, clear any assignments made in the + # loop from the top level + if self._assign_stack: + self._assign_stack[-1].difference_update(loop_frame.symbols.stores) + + def visit_If(self, node: nodes.If, frame: Frame) -> None: + if_frame = frame.soft() + self.writeline("if ", node) + self.visit(node.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(node.body, if_frame) + self.outdent() + for elif_ in node.elif_: + self.writeline("elif ", elif_) + self.visit(elif_.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(elif_.body, if_frame) + self.outdent() + if node.else_: + self.writeline("else:") + self.indent() + self.blockvisit(node.else_, if_frame) + self.outdent() + + def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None: + macro_frame, macro_ref = self.macro_body(node, frame) + self.newline() + if frame.toplevel: + if not node.name.startswith("_"): + self.write(f"context.exported_vars.add({node.name!r})") + self.writeline(f"context.vars[{node.name!r}] = ") + self.write(f"{frame.symbols.ref(node.name)} = ") + self.macro_def(macro_ref, macro_frame) + + def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None: + call_frame, macro_ref = self.macro_body(node, frame) + self.writeline("caller = ") + self.macro_def(macro_ref, call_frame) + self.start_write(frame, node) + self.visit_Call(node.call, frame, forward_caller=True) + self.end_write(frame) + + def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None: + filter_frame = frame.inner() + filter_frame.symbols.analyze_node(node) + self.enter_frame(filter_frame) + self.buffer(filter_frame) + self.blockvisit(node.body, filter_frame) + self.start_write(frame, node) + self.visit_Filter(node.filter, filter_frame) + self.end_write(frame) + self.leave_frame(filter_frame) + + def visit_With(self, node: nodes.With, frame: Frame) -> None: + with_frame = frame.inner() + with_frame.symbols.analyze_node(node) + self.enter_frame(with_frame) + for target, expr in zip(node.targets, node.values): + self.newline() + self.visit(target, with_frame) + self.write(" = ") + self.visit(expr, frame) + self.blockvisit(node.body, with_frame) + self.leave_frame(with_frame) + + def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None: + self.newline(node) + self.visit(node.node, frame) + + class _FinalizeInfo(t.NamedTuple): + const: t.Optional[t.Callable[..., str]] + src: t.Optional[str] + + @staticmethod + def _default_finalize(value: t.Any) -> t.Any: + """The default finalize function if the environment isn't + configured with one. Or, if the environment has one, this is + called on that function's output for constants. + """ + return str(value) + + _finalize: t.Optional[_FinalizeInfo] = None + + def _make_finalize(self) -> _FinalizeInfo: + """Build the finalize function to be used on constants and at + runtime. Cached so it's only created once for all output nodes. + + Returns a ``namedtuple`` with the following attributes: + + ``const`` + A function to finalize constant data at compile time. + + ``src`` + Source code to output around nodes to be evaluated at + runtime. + """ + if self._finalize is not None: + return self._finalize + + finalize: t.Optional[t.Callable[..., t.Any]] + finalize = default = self._default_finalize + src = None + + if self.environment.finalize: + src = "environment.finalize(" + env_finalize = self.environment.finalize + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(env_finalize) # type: ignore + ) + finalize = None + + if pass_arg is None: + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(value)) + + else: + src = f"{src}{pass_arg}, " + + if pass_arg == "environment": + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(self.environment, value)) + + self._finalize = self._FinalizeInfo(finalize, src) + return self._finalize + + def _output_const_repr(self, group: t.Iterable[t.Any]) -> str: + """Given a group of constant values converted from ``Output`` + child nodes, produce a string to write to the template module + source. + """ + return repr(concat(group)) + + def _output_child_to_const( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> str: + """Try to optimize a child of an ``Output`` node by trying to + convert it to constant, finalized data at compile time. + + If :exc:`Impossible` is raised, the node is not constant and + will be evaluated at runtime. Any other exception will also be + evaluated at runtime for easier debugging. + """ + const = node.as_const(frame.eval_ctx) + + if frame.eval_ctx.autoescape: + const = escape(const) + + # Template data doesn't go through finalize. + if isinstance(node, nodes.TemplateData): + return str(const) + + return finalize.const(const) # type: ignore + + def _output_child_pre( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code before visiting a child of an + ``Output`` node. + """ + if frame.eval_ctx.volatile: + self.write("(escape if context.eval_ctx.autoescape else str)(") + elif frame.eval_ctx.autoescape: + self.write("escape(") + else: + self.write("str(") + + if finalize.src is not None: + self.write(finalize.src) + + def _output_child_post( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code after visiting a child of an + ``Output`` node. + """ + self.write(")") + + if finalize.src is not None: + self.write(")") + + def visit_Output(self, node: nodes.Output, frame: Frame) -> None: + # If an extends is active, don't render outside a block. + if frame.require_output_check: + # A top-level extends is known to exist at compile time. + if self.has_known_extends: + return + + self.writeline("if parent_template is None:") + self.indent() + + finalize = self._make_finalize() + body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = [] + + # Evaluate constants at compile time if possible. Each item in + # body will be either a list of static data or a node to be + # evaluated at runtime. + for child in node.nodes: + try: + if not ( + # If the finalize function requires runtime context, + # constants can't be evaluated at compile time. + finalize.const + # Unless it's basic template data that won't be + # finalized anyway. + or isinstance(child, nodes.TemplateData) + ): + raise nodes.Impossible() + + const = self._output_child_to_const(child, frame, finalize) + except (nodes.Impossible, Exception): + # The node was not constant and needs to be evaluated at + # runtime. Or another error was raised, which is easier + # to debug at runtime. + body.append(child) + continue + + if body and isinstance(body[-1], list): + body[-1].append(const) + else: + body.append([const]) + + if frame.buffer is not None: + if len(body) == 1: + self.writeline(f"{frame.buffer}.append(") + else: + self.writeline(f"{frame.buffer}.extend((") + + self.indent() + + for item in body: + if isinstance(item, list): + # A group of constant data to join and output. + val = self._output_const_repr(item) + + if frame.buffer is None: + self.writeline("yield " + val) + else: + self.writeline(val + ",") + else: + if frame.buffer is None: + self.writeline("yield ", item) + else: + self.newline(item) + + # A node to be evaluated at runtime. + self._output_child_pre(item, frame, finalize) + self.visit(item, frame) + self._output_child_post(item, frame, finalize) + + if frame.buffer is not None: + self.write(",") + + if frame.buffer is not None: + self.outdent() + self.writeline(")" if len(body) == 1 else "))") + + if frame.require_output_check: + self.outdent() + + def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None: + self.push_assign_tracking() + + # ``a.b`` is allowed for assignment, and is parsed as an NSRef. However, + # it is only valid if it references a Namespace object. Emit a check for + # that for each ref here, before assignment code is emitted. This can't + # be done in visit_NSRef as the ref could be in the middle of a tuple. + seen_refs: t.Set[str] = set() + + for nsref in node.find_all(nodes.NSRef): + if nsref.name in seen_refs: + # Only emit the check for each reference once, in case the same + # ref is used multiple times in a tuple, `ns.a, ns.b = c, d`. + continue + + seen_refs.add(nsref.name) + ref = frame.symbols.ref(nsref.name) + self.writeline(f"if not isinstance({ref}, Namespace):") + self.indent() + self.writeline( + "raise TemplateRuntimeError" + '("cannot assign attribute on non-namespace object")' + ) + self.outdent() + + self.newline(node) + self.visit(node.target, frame) + self.write(" = ") + self.visit(node.node, frame) + self.pop_assign_tracking(frame) + + def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None: + self.push_assign_tracking() + block_frame = frame.inner() + # This is a special case. Since a set block always captures we + # will disable output checks. This way one can use set blocks + # toplevel even in extended templates. + block_frame.require_output_check = False + block_frame.symbols.analyze_node(node) + self.enter_frame(block_frame) + self.buffer(block_frame) + self.blockvisit(node.body, block_frame) + self.newline(node) + self.visit(node.target, frame) + self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") + if node.filter is not None: + self.visit_Filter(node.filter, block_frame) + else: + self.write(f"concat({block_frame.buffer})") + self.write(")") + self.pop_assign_tracking(frame) + self.leave_frame(block_frame) + + # -- Expression Visitors + + def visit_Name(self, node: nodes.Name, frame: Frame) -> None: + if node.ctx == "store" and ( + frame.toplevel or frame.loop_frame or frame.block_frame + ): + if self._assign_stack: + self._assign_stack[-1].add(node.name) + ref = frame.symbols.ref(node.name) + + # If we are looking up a variable we might have to deal with the + # case where it's undefined. We can skip that case if the load + # instruction indicates a parameter which are always defined. + if node.ctx == "load": + load = frame.symbols.find_load(ref) + if not ( + load is not None + and load[0] == VAR_LOAD_PARAMETER + and not self.parameter_is_undeclared(ref) + ): + self.write( + f"(undefined(name={node.name!r}) if {ref} is missing else {ref})" + ) + return + + self.write(ref) + + def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None: + # NSRef is a dotted assignment target a.b=c, but uses a[b]=c internally. + # visit_Assign emits code to validate that each ref is to a Namespace + # object only. That can't be emitted here as the ref could be in the + # middle of a tuple assignment. + ref = frame.symbols.ref(node.name) + self.writeline(f"{ref}[{node.attr!r}]") + + def visit_Const(self, node: nodes.Const, frame: Frame) -> None: + val = node.as_const(frame.eval_ctx) + if isinstance(val, float): + self.write(str(val)) + else: + self.write(repr(val)) + + def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None: + try: + self.write(repr(node.as_const(frame.eval_ctx))) + except nodes.Impossible: + self.write( + f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})" + ) + + def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None: + self.write("(") + idx = -1 + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write(",)" if idx == 0 else ")") + + def visit_List(self, node: nodes.List, frame: Frame) -> None: + self.write("[") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write("]") + + def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None: + self.write("{") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item.key, frame) + self.write(": ") + self.visit(item.value, frame) + self.write("}") + + visit_Add = _make_binop("+") + visit_Sub = _make_binop("-") + visit_Mul = _make_binop("*") + visit_Div = _make_binop("/") + visit_FloorDiv = _make_binop("//") + visit_Pow = _make_binop("**") + visit_Mod = _make_binop("%") + visit_And = _make_binop("and") + visit_Or = _make_binop("or") + visit_Pos = _make_unop("+") + visit_Neg = _make_unop("-") + visit_Not = _make_unop("not ") + + @optimizeconst + def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None: + if frame.eval_ctx.volatile: + func_name = "(markup_join if context.eval_ctx.volatile else str_join)" + elif frame.eval_ctx.autoescape: + func_name = "markup_join" + else: + func_name = "str_join" + self.write(f"{func_name}((") + for arg in node.nodes: + self.visit(arg, frame) + self.write(", ") + self.write("))") + + @optimizeconst + def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None: + self.write("(") + self.visit(node.expr, frame) + for op in node.ops: + self.visit(op, frame) + self.write(")") + + def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None: + self.write(f" {operators[node.op]} ") + self.visit(node.expr, frame) + + @optimizeconst + def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getattr(") + self.visit(node.node, frame) + self.write(f", {node.attr!r})") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None: + # slices bypass the environment getitem method. + if isinstance(node.arg, nodes.Slice): + self.visit(node.node, frame) + self.write("[") + self.visit(node.arg, frame) + self.write("]") + else: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getitem(") + self.visit(node.node, frame) + self.write(", ") + self.visit(node.arg, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None: + if node.start is not None: + self.visit(node.start, frame) + self.write(":") + if node.stop is not None: + self.visit(node.stop, frame) + if node.step is not None: + self.write(":") + self.visit(node.step, frame) + + @contextmanager + def _filter_test_common( + self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool + ) -> t.Iterator[None]: + if self.environment.is_async: + self.write("(await auto_await(") + + if is_filter: + self.write(f"{self.filters[node.name]}(") + func = self.environment.filters.get(node.name) + else: + self.write(f"{self.tests[node.name]}(") + func = self.environment.tests.get(node.name) + + # When inside an If or CondExpr frame, allow the filter to be + # undefined at compile time and only raise an error if it's + # actually called at runtime. See pull_dependencies. + if func is None and not frame.soft_frame: + type_name = "filter" if is_filter else "test" + self.fail(f"No {type_name} named {node.name!r}.", node.lineno) + + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(func) # type: ignore + ) + + if pass_arg is not None: + self.write(f"{pass_arg}, ") + + # Back to the visitor function to handle visiting the target of + # the filter or test. + yield + + self.signature(node, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None: + with self._filter_test_common(node, frame, True): + # if the filter node is None we are inside a filter block + # and want to write to the current buffer + if node.node is not None: + self.visit(node.node, frame) + elif frame.eval_ctx.volatile: + self.write( + f"(Markup(concat({frame.buffer}))" + f" if context.eval_ctx.autoescape else concat({frame.buffer}))" + ) + elif frame.eval_ctx.autoescape: + self.write(f"Markup(concat({frame.buffer}))") + else: + self.write(f"concat({frame.buffer})") + + @optimizeconst + def visit_Test(self, node: nodes.Test, frame: Frame) -> None: + with self._filter_test_common(node, frame, False): + self.visit(node.node, frame) + + @optimizeconst + def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None: + frame = frame.soft() + + def write_expr2() -> None: + if node.expr2 is not None: + self.visit(node.expr2, frame) + return + + self.write( + f'cond_expr_undefined("the inline if-expression on' + f" {self.position(node)} evaluated to false and no else" + f' section was defined.")' + ) + + self.write("(") + self.visit(node.expr1, frame) + self.write(" if ") + self.visit(node.test, frame) + self.write(" else ") + write_expr2() + self.write(")") + + @optimizeconst + def visit_Call( + self, node: nodes.Call, frame: Frame, forward_caller: bool = False + ) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + if self.environment.sandboxed: + self.write("environment.call(context, ") + else: + self.write("context.call(") + self.visit(node.node, frame) + extra_kwargs = {"caller": "caller"} if forward_caller else None + loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {} + block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {} + if extra_kwargs: + extra_kwargs.update(loop_kwargs, **block_kwargs) + elif loop_kwargs or block_kwargs: + extra_kwargs = dict(loop_kwargs, **block_kwargs) + self.signature(node, frame, extra_kwargs) + self.write(")") + if self.environment.is_async: + self.write("))") + + def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None: + self.write(node.key + "=") + self.visit(node.value, frame) + + # -- Unused nodes for extensions + + def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None: + self.write("Markup(") + self.visit(node.expr, frame) + self.write(")") + + def visit_MarkSafeIfAutoescape( + self, node: nodes.MarkSafeIfAutoescape, frame: Frame + ) -> None: + self.write("(Markup if context.eval_ctx.autoescape else identity)(") + self.visit(node.expr, frame) + self.write(")") + + def visit_EnvironmentAttribute( + self, node: nodes.EnvironmentAttribute, frame: Frame + ) -> None: + self.write("environment." + node.name) + + def visit_ExtensionAttribute( + self, node: nodes.ExtensionAttribute, frame: Frame + ) -> None: + self.write(f"environment.extensions[{node.identifier!r}].{node.name}") + + def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None: + self.write(self.import_aliases[node.importname]) + + def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None: + self.write(node.name) + + def visit_ContextReference( + self, node: nodes.ContextReference, frame: Frame + ) -> None: + self.write("context") + + def visit_DerivedContextReference( + self, node: nodes.DerivedContextReference, frame: Frame + ) -> None: + self.write(self.derive_context(frame)) + + def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None: + self.writeline("continue", node) + + def visit_Break(self, node: nodes.Break, frame: Frame) -> None: + self.writeline("break", node) + + def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None: + scope_frame = frame.inner() + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + + def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None: + ctx = self.temporary_identifier() + self.writeline(f"{ctx} = {self.derive_context(frame)}") + self.writeline(f"{ctx}.vars = ") + self.visit(node.context, frame) + self.push_context_reference(ctx) + + scope_frame = frame.inner(isolated=True) + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + self.pop_context_reference() + + def visit_EvalContextModifier( + self, node: nodes.EvalContextModifier, frame: Frame + ) -> None: + for keyword in node.options: + self.writeline(f"context.eval_ctx.{keyword.key} = ") + self.visit(keyword.value, frame) + try: + val = keyword.value.as_const(frame.eval_ctx) + except nodes.Impossible: + frame.eval_ctx.volatile = True + else: + setattr(frame.eval_ctx, keyword.key, val) + + def visit_ScopedEvalContextModifier( + self, node: nodes.ScopedEvalContextModifier, frame: Frame + ) -> None: + old_ctx_name = self.temporary_identifier() + saved_ctx = frame.eval_ctx.save() + self.writeline(f"{old_ctx_name} = context.eval_ctx.save()") + self.visit_EvalContextModifier(node, frame) + for child in node.body: + self.visit(child, frame) + frame.eval_ctx.revert(saved_ctx) + self.writeline(f"context.eval_ctx.revert({old_ctx_name})") diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/constants.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/constants.py new file mode 100644 index 0000000..41a1c23 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/constants.py @@ -0,0 +1,20 @@ +#: list of lorem ipsum words used by the lipsum() helper function +LOREM_IPSUM_WORDS = """\ +a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at +auctor augue bibendum blandit class commodo condimentum congue consectetuer +consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend +elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames +faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac +hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum +justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem +luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie +mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non +nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque +penatibus per pharetra phasellus placerat platea porta porttitor posuere +potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus +ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit +sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor +tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices +ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus +viverra volutpat vulputate""" diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/debug.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/debug.py new file mode 100644 index 0000000..eeeeee7 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/debug.py @@ -0,0 +1,191 @@ +import sys +import typing as t +from types import CodeType +from types import TracebackType + +from .exceptions import TemplateSyntaxError +from .utils import internal_code +from .utils import missing + +if t.TYPE_CHECKING: + from .runtime import Context + + +def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException: + """Rewrite the current exception to replace any tracebacks from + within compiled template code with tracebacks that look like they + came from the template source. + + This must be called within an ``except`` block. + + :param source: For ``TemplateSyntaxError``, the original source if + known. + :return: The original exception with the rewritten traceback. + """ + _, exc_value, tb = sys.exc_info() + exc_value = t.cast(BaseException, exc_value) + tb = t.cast(TracebackType, tb) + + if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: + exc_value.translated = True + exc_value.source = source + # Remove the old traceback, otherwise the frames from the + # compiler still show up. + exc_value.with_traceback(None) + # Outside of runtime, so the frame isn't executing template + # code, but it still needs to point at the template. + tb = fake_traceback( + exc_value, None, exc_value.filename or "", exc_value.lineno + ) + else: + # Skip the frame for the render function. + tb = tb.tb_next + + stack = [] + + # Build the stack of traceback object, replacing any in template + # code with the source file and line information. + while tb is not None: + # Skip frames decorated with @internalcode. These are internal + # calls that aren't useful in template debugging output. + if tb.tb_frame.f_code in internal_code: + tb = tb.tb_next + continue + + template = tb.tb_frame.f_globals.get("__jinja_template__") + + if template is not None: + lineno = template.get_corresponding_lineno(tb.tb_lineno) + fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + tb_next = None + + # Assign tb_next in reverse to avoid circular references. + for tb in reversed(stack): + tb.tb_next = tb_next + tb_next = tb + + return exc_value.with_traceback(tb_next) + + +def fake_traceback( # type: ignore + exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int +) -> TracebackType: + """Produce a new traceback object that looks like it came from the + template source instead of the compiled code. The filename, line + number, and location name will point to the template, and the local + variables will be the current template context. + + :param exc_value: The original exception to be re-raised to create + the new traceback. + :param tb: The original traceback to get the local variables and + code info from. + :param filename: The template filename. + :param lineno: The line number in the template source. + """ + if tb is not None: + # Replace the real locals with the context that would be + # available at that point in the template. + locals = get_template_locals(tb.tb_frame.f_locals) + locals.pop("__jinja_exception__", None) + else: + locals = {} + + globals = { + "__name__": filename, + "__file__": filename, + "__jinja_exception__": exc_value, + } + # Raise an exception at the correct line number. + code: CodeType = compile( + "\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec" + ) + + # Build a new code object that points to the template file and + # replaces the location with a block name. + location = "template" + + if tb is not None: + function = tb.tb_frame.f_code.co_name + + if function == "root": + location = "top-level template code" + elif function.startswith("block_"): + location = f"block {function[6:]!r}" + + if sys.version_info >= (3, 8): + code = code.replace(co_name=location) + else: + code = CodeType( + code.co_argcount, + code.co_kwonlyargcount, + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + code.co_consts, + code.co_names, + code.co_varnames, + code.co_filename, + location, + code.co_firstlineno, + code.co_lnotab, + code.co_freevars, + code.co_cellvars, + ) + + # Execute the new code, which is guaranteed to raise, and return + # the new traceback without this frame. + try: + exec(code, globals, locals) + except BaseException: + return sys.exc_info()[2].tb_next # type: ignore + + +def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]: + """Based on the runtime locals, get the context that would be + available at that point in the template. + """ + # Start with the current template context. + ctx: t.Optional[Context] = real_locals.get("context") + + if ctx is not None: + data: t.Dict[str, t.Any] = ctx.get_all().copy() + else: + data = {} + + # Might be in a derived context that only sets local variables + # rather than pushing a context. Local variables follow the scheme + # l_depth_name. Find the highest-depth local that has a value for + # each name. + local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {} + + for name, value in real_locals.items(): + if not name.startswith("l_") or value is missing: + # Not a template variable, or no longer relevant. + continue + + try: + _, depth_str, name = name.split("_", 2) + depth = int(depth_str) + except ValueError: + continue + + cur_depth = local_overrides.get(name, (-1,))[0] + + if cur_depth < depth: + local_overrides[name] = (depth, value) + + # Modify the context with any derived context. + for name, (_, value) in local_overrides.items(): + if value is missing: + data.pop(name, None) + else: + data[name] = value + + return data diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/defaults.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/defaults.py new file mode 100644 index 0000000..638cad3 --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/defaults.py @@ -0,0 +1,48 @@ +import typing as t + +from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 +from .tests import TESTS as DEFAULT_TESTS # noqa: F401 +from .utils import Cycler +from .utils import generate_lorem_ipsum +from .utils import Joiner +from .utils import Namespace + +if t.TYPE_CHECKING: + import typing_extensions as te + +# defaults for the parser / lexer +BLOCK_START_STRING = "{%" +BLOCK_END_STRING = "%}" +VARIABLE_START_STRING = "{{" +VARIABLE_END_STRING = "}}" +COMMENT_START_STRING = "{#" +COMMENT_END_STRING = "#}" +LINE_STATEMENT_PREFIX: t.Optional[str] = None +LINE_COMMENT_PREFIX: t.Optional[str] = None +TRIM_BLOCKS = False +LSTRIP_BLOCKS = False +NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n" +KEEP_TRAILING_NEWLINE = False + +# default filters, tests and namespace + +DEFAULT_NAMESPACE = { + "range": range, + "dict": dict, + "lipsum": generate_lorem_ipsum, + "cycler": Cycler, + "joiner": Joiner, + "namespace": Namespace, +} + +# default policies +DEFAULT_POLICIES: t.Dict[str, t.Any] = { + "compiler.ascii_str": True, + "urlize.rel": "noopener", + "urlize.target": None, + "urlize.extra_schemes": None, + "truncate.leeway": 5, + "json.dumps_function": None, + "json.dumps_kwargs": {"sort_keys": True}, + "ext.i18n.trimmed": False, +} diff --git a/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/environment.py b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/environment.py new file mode 100644 index 0000000..0fc6e5b --- /dev/null +++ b/tools/converter-generator/venv/lib/python3.11/site-packages/jinja2/environment.py @@ -0,0 +1,1672 @@ +"""Classes for managing templates and their runtime and compile time +options. +""" + +import os +import typing +import typing as t +import weakref +from collections import ChainMap +from functools import lru_cache +from functools import partial +from functools import reduce +from types import CodeType + +from markupsafe import Markup + +from . import nodes +from .compiler import CodeGenerator +from .compiler import generate +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import DEFAULT_FILTERS # type: ignore[attr-defined] +from .defaults import DEFAULT_NAMESPACE +from .defaults import DEFAULT_POLICIES +from .defaults import DEFAULT_TESTS # type: ignore[attr-defined] +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .lexer import get_lexer +from .lexer import Lexer +from .lexer import TokenStream +from .nodes import EvalContext +from .parser import Parser +from .runtime import Context +from .runtime import new_context +from .runtime import Undefined +from .utils import _PassArg +from .utils import concat +from .utils import consume +from .utils import import_string +from .utils import internalcode +from .utils import LRUCache +from .utils import missing + +if t.TYPE_CHECKING: + import typing_extensions as te + + from .bccache import BytecodeCache + from .ext import Extension + from .loaders import BaseLoader + +_env_bound = t.TypeVar("_env_bound", bound="Environment") + + +# for direct template usage we have up to ten living environments +@lru_cache(maxsize=10) +def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound: + """Return a new spontaneous environment. A spontaneous environment + is used for templates created directly rather than through an + existing environment. + + :param cls: Environment class to create. + :param args: Positional arguments passed to environment. + """ + env = cls(*args) + env.shared = True + return env + + +def create_cache( + size: int, +) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[t.Any]", str], "Template"]]: + """Return the cache class for the given size.""" + if size == 0: + return None + + if size < 0: + return {} + + return LRUCache(size) # type: ignore + + +def copy_cache( + cache: t.Optional[t.MutableMapping[t.Any, t.Any]], +) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[t.Any]", str], "Template"]]: + """Create an empty copy of the given cache.""" + if cache is None: + return None + + if type(cache) is dict: # noqa E721 + return {} + + return LRUCache(cache.capacity) # type: ignore + + +def load_extensions( + environment: "Environment", + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]], +) -> t.Dict[str, "Extension"]: + """Load the extensions from the list and bind it to the environment. + Returns a dict of instantiated extensions. + """ + result = {} + + for extension in extensions: + if isinstance(extension, str): + extension = t.cast(t.Type["Extension"], import_string(extension)) + + result[extension.identifier] = extension(environment) + + return result + + +def _environment_config_check(environment: _env_bound) -> _env_bound: + """Perform a sanity check on the environment.""" + assert issubclass( + environment.undefined, Undefined + ), "'undefined' must be a subclass of 'jinja2.Undefined'." + assert ( + environment.block_start_string + != environment.variable_start_string + != environment.comment_start_string + ), "block, variable and comment start strings must be different." + assert environment.newline_sequence in { + "\r", + "\r\n", + "\n", + }, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'." + return environment + + +class Environment: + r"""The core component of Jinja is the `Environment`. It contains + important shared variables like configuration, filters, tests, + globals and others. Instances of this class may be modified if + they are not shared and if no template was loaded so far. + Modifications on environments after the first template was loaded + will lead to surprising effects and undefined behavior. + + Here are the possible initialization parameters: + + `block_start_string` + The string marking the beginning of a block. Defaults to ``'{%'``. + + `block_end_string` + The string marking the end of a block. Defaults to ``'%}'``. + + `variable_start_string` + The string marking the beginning of a print statement. + Defaults to ``'{{'``. + + `variable_end_string` + The string marking the end of a print statement. Defaults to + ``'}}'``. + + `comment_start_string` + The string marking the beginning of a comment. Defaults to ``'{#'``. + + `comment_end_string` + The string marking the end of a comment. Defaults to ``'#}'``. + + `line_statement_prefix` + If given and a string, this will be used as prefix for line based + statements. See also :ref:`line-statements`. + + `line_comment_prefix` + If given and a string, this will be used as prefix for line based + comments. See also :ref:`line-statements`. + + .. versionadded:: 2.2 + + `trim_blocks` + If this is set to ``True`` the first newline after a block is + removed (block, not variable tag!). Defaults to `False`. + + `lstrip_blocks` + If this is set to ``True`` leading spaces and tabs are stripped + from the start of a line to a block. Defaults to `False`. + + `newline_sequence` + The sequence that starts a newline. Must be one of ``'\r'``, + ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a + useful default for Linux and OS X systems as well as web + applications. + + `keep_trailing_newline` + Preserve the trailing newline when rendering templates. + The default is ``False``, which causes a single newline, + if present, to be stripped from the end of the template. + + .. versionadded:: 2.7 + + `extensions` + List of Jinja extensions to use. This can either be import paths + as strings or extension classes. For more information have a + look at :ref:`the extensions documentation `. + + `optimized` + should the optimizer be enabled? Default is ``True``. + + `undefined` + :class:`Undefined` or a subclass of it that is used to represent + undefined values in the template. + + `finalize` + A callable that can be used to process the result of a variable + expression before it is output. For example one can convert + ``None`` implicitly into an empty string here. + + `autoescape` + If set to ``True`` the XML/HTML autoescaping feature is enabled by + default. For more details about autoescaping see + :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also + be a callable that is passed the template name and has to + return ``True`` or ``False`` depending on autoescape should be + enabled by default. + + .. versionchanged:: 2.4 + `autoescape` can now be a function + + `loader` + The template loader for this environment. + + `cache_size` + The size of the cache. Per default this is ``400`` which means + that if more than 400 templates are loaded the loader will clean + out the least recently used template. If the cache size is set to + ``0`` templates are recompiled all the time, if the cache size is + ``-1`` the cache will not be cleaned. + + .. versionchanged:: 2.8 + The cache size was increased to 400 from a low 50. + + `auto_reload` + Some loaders load templates from locations where the template + sources may change (ie: file system or database). If + ``auto_reload`` is set to ``True`` (default) every time a template is + requested the loader checks if the source changed and if yes, it + will reload the template. For higher performance it's possible to + disable that. + + `bytecode_cache` + If set to a bytecode cache object, this object will provide a + cache for the internal Jinja bytecode so that templates don't + have to be parsed if they were not changed. + + See :ref:`bytecode-cache` for more information. + + `enable_async` + If set to true this enables async template execution which + allows using async functions and generators. + """ + + #: if this environment is sandboxed. Modifying this variable won't make + #: the environment sandboxed though. For a real sandboxed environment + #: have a look at jinja2.sandbox. This flag alone controls the code + #: generation by the compiler. + sandboxed = False + + #: True if the environment is just an overlay + overlayed = False + + #: the environment this environment is linked to if it is an overlay + linked_to: t.Optional["Environment"] = None + + #: shared environments have this set to `True`. A shared environment + #: must not be modified + shared = False + + #: the class that is used for code generation. See + #: :class:`~jinja2.compiler.CodeGenerator` for more information. + code_generator_class: t.Type["CodeGenerator"] = CodeGenerator + + concat = "".join + + #: the context class that is used for templates. See + #: :class:`~jinja2.runtime.Context` for more information. + context_class: t.Type[Context] = Context + + template_class: t.Type["Template"] + + def __init__( + self, + block_start_string: str = BLOCK_START_STRING, + block_end_string: str = BLOCK_END_STRING, + variable_start_string: str = VARIABLE_START_STRING, + variable_end_string: str = VARIABLE_END_STRING, + comment_start_string: str = COMMENT_START_STRING, + comment_end_string: str = COMMENT_END_STRING, + line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX, + line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX, + trim_blocks: bool = TRIM_BLOCKS, + lstrip_blocks: bool = LSTRIP_BLOCKS, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE, + keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (), + optimized: bool = True, + undefined: t.Type[Undefined] = Undefined, + finalize: t.Optional[t.Callable[..., t.Any]] = None, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False, + loader: t.Optional["BaseLoader"] = None, + cache_size: int = 400, + auto_reload: bool = True, + bytecode_cache: t.Optional["BytecodeCache"] = None, + enable_async: bool = False, + ): + # !!Important notice!! + # The constructor accepts quite a few arguments that should be + # passed by keyword rather than position. However it's important to + # not change the order of arguments because it's used at least + # internally in those cases: + # - spontaneous environments (i18n extension and Template) + # - unittests + # If parameter changes are required only add parameters at the end + # and don't change the arguments (or the defaults!) of the arguments + # existing already. + + # lexer / parser information + self.block_start_string = block_start_string + self.block_end_string = block_end_string + self.variable_start_string = variable_start_string + self.variable_end_string = variable_end_string + self.comment_start_string = comment_start_string + self.comment_end_string = comment_end_string + self.line_statement_prefix = line_statement_prefix + self.line_comment_prefix = line_comment_prefix + self.trim_blocks = trim_blocks + self.lstrip_blocks = lstrip_blocks + self.newline_sequence = newline_sequence + self.keep_trailing_newline = keep_trailing_newline + + # runtime information + self.undefined: t.Type[Undefined] = undefined + self.optimized = optimized + self.finalize = finalize + self.autoescape = autoescape + + # defaults + self.filters = DEFAULT_FILTERS.copy() + self.tests = DEFAULT_TESTS.copy() + self.globals = DEFAULT_NAMESPACE.copy() + + # set the loader provided + self.loader = loader + self.cache = create_cache(cache_size) + self.bytecode_cache = bytecode_cache + self.auto_reload = auto_reload + + # configurable policies + self.policies = DEFAULT_POLICIES.copy() + + # load extensions + self.extensions = load_extensions(self, extensions) + + self.is_async = enable_async + _environment_config_check(self) + + def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None: + """Adds an extension after the environment was created. + + .. versionadded:: 2.5 + """ + self.extensions.update(load_extensions(self, [extension])) + + def extend(self, **attributes: t.Any) -> None: + """Add the items to the instance of the environment if they do not exist + yet. This is used by :ref:`extensions ` to register + callbacks and configuration values without breaking inheritance. + """ + for key, value in attributes.items(): + if not hasattr(self, key): + setattr(self, key, value) + + def overlay( + self, + block_start_string: str = missing, + block_end_string: str = missing, + variable_start_string: str = missing, + variable_end_string: str = missing, + comment_start_string: str = missing, + comment_end_string: str = missing, + line_statement_prefix: t.Optional[str] = missing, + line_comment_prefix: t.Optional[str] = missing, + trim_blocks: bool = missing, + lstrip_blocks: bool = missing, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = missing, + keep_trailing_newline: bool = missing, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing, + optimized: bool = missing, + undefined: t.Type[Undefined] = missing, + finalize: t.Optional[t.Callable[..., t.Any]] = missing, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing, + loader: t.Optional["BaseLoader"] = missing, + cache_size: int = missing, + auto_reload: bool = missing, + bytecode_cache: t.Optional["BytecodeCache"] = missing, + enable_async: bool = missing, + ) -> "te.Self": + """Create a new overlay environment that shares all the data with the + current environment except for cache and the overridden attributes. + Extensions cannot be removed for an overlayed environment. An overlayed + environment automatically gets all the extensions of the environment it + is linked to plus optional extra extensions. + + Creating overlays should happen after the initial environment was set + up completely. Not all attributes are truly linked, some are just + copied over so modifications on the original environment may not shine + through. + + .. versionchanged:: 3.1.5 + ``enable_async`` is applied correctly. + + .. versionchanged:: 3.1.2 + Added the ``newline_sequence``, ``keep_trailing_newline``, + and ``enable_async`` parameters to match ``__init__``. + """ + args = dict(locals()) + del args["self"], args["cache_size"], args["extensions"], args["enable_async"] + + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.overlayed = True + rv.linked_to = self + + for key, value in args.items(): + if value is not missing: + setattr(rv, key, value) + + if cache_size is not missing: + rv.cache = create_cache(cache_size) + else: + rv.cache = copy_cache(self.cache) + + rv.extensions = {} + for key, value in self.extensions.items(): + rv.extensions[key] = value.bind(rv) + if extensions is not missing: + rv.extensions.update(load_extensions(rv, extensions)) + + if enable_async is not missing: + rv.is_async = enable_async + + return _environment_config_check(rv) + + @property + def lexer(self) -> Lexer: + """The lexer for this environment.""" + return get_lexer(self) + + def iter_extensions(self) -> t.Iterator["Extension"]: + """Iterates over the extensions by priority.""" + return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) + + def getitem( + self, obj: t.Any, argument: t.Union[str, t.Any] + ) -> t.Union[t.Any, Undefined]: + """Get an item or attribute of an object but prefer the item.""" + try: + return obj[argument] + except (AttributeError, TypeError, LookupError): + if isinstance(argument, str): + try: + attr = str(argument) + except Exception: + pass + else: + try: + return getattr(obj, attr) + except AttributeError: + pass + return self.undefined(obj=obj, name=argument) + + def getattr(self, obj: t.Any, attribute: str) -> t.Any: + """Get an item or attribute of an object but prefer the attribute. + Unlike :meth:`getitem` the attribute *must* be a string. + """ + try: + return getattr(obj, attribute) + except AttributeError: + pass + try: + return obj[attribute] + except (TypeError, LookupError, AttributeError): + return self.undefined(obj=obj, name=attribute) + + def _filter_test_common( + self, + name: t.Union[str, Undefined], + value: t.Any, + args: t.Optional[t.Sequence[t.Any]], + kwargs: t.Optional[t.Mapping[str, t.Any]], + context: t.Optional[Context], + eval_ctx: t.Optional[EvalContext], + is_filter: bool, + ) -> t.Any: + if is_filter: + env_map = self.filters + type_name = "filter" + else: + env_map = self.tests + type_name = "test" + + func = env_map.get(name) # type: ignore + + if func is None: + msg = f"No {type_name} named {name!r}." + + if isinstance(name, Undefined): + try: + name._fail_with_undefined_error() + except Exception as e: + msg = f"{msg} ({e}; did you forget to quote the callable name?)" + + raise TemplateRuntimeError(msg) + + args = [value, *(args if args is not None else ())] + kwargs = kwargs if kwargs is not None else {} + pass_arg = _PassArg.from_obj(func) + + if pass_arg is _PassArg.context: + if context is None: + raise TemplateRuntimeError( + f"Attempted to invoke a context {type_name} without context." + ) + + args.insert(0, context) + elif pass_arg is _PassArg.eval_context: + if eval_ctx is None: + if context is not None: + eval_ctx = context.eval_ctx + else: + eval_ctx = EvalContext(self) + + args.insert(0, eval_ctx) + elif pass_arg is _PassArg.environment: + args.insert(0, self) + + return func(*args, **kwargs) + + def call_filter( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a filter on a value the same way the compiler does. + + This might return a coroutine if the filter is running from an + environment in async mode and the filter supports async + execution. It's your responsibility to await this if needed. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, True + ) + + def call_test( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a test on a value the same way the compiler does. + + This might return a coroutine if the test is running from an + environment in async mode and the test supports async execution. + It's your responsibility to await this if needed. + + .. versionchanged:: 3.0 + Tests support ``@pass_context``, etc. decorators. Added + the ``context`` and ``eval_ctx`` parameters. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, False + ) + + @internalcode + def parse( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> nodes.Template: + """Parse the sourcecode and return the abstract syntax tree. This + tree of nodes is used by the compiler to convert the template into + executable source- or bytecode. This is useful for debugging or to + extract information from templates. + + If you are :ref:`developing Jinja extensions ` + this gives you a good overview of the node tree generated. + """ + try: + return self._parse(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def _parse( + self, source: str, name: t.Optional[str], filename: t.Optional[str] + ) -> nodes.Template: + """Internal parsing function used by `parse` and `compile`.""" + return Parser(self, source, name, filename).parse() + + def lex( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> t.Iterator[t.Tuple[int, str, str]]: + """Lex the given sourcecode and return a generator that yields + tokens as tuples in the form ``(lineno, token_type, value)``. + This can be useful for :ref:`extension development ` + and debugging templates. + + This does not perform preprocessing. If you want the preprocessing + of the extensions to be applied you have to filter source through + the :meth:`preprocess` method. + """ + source = str(source) + try: + return self.lexer.tokeniter(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def preprocess( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> str: + """Preprocesses the source with all extensions. This is automatically + called for all parsing and compiling methods but *not* for :meth:`lex` + because there you usually only want the actual source tokenized. + """ + return reduce( + lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), + str(source), + ) + + def _tokenize( + self, + source: str, + name: t.Optional[str], + filename: t.Optional[str] = None, + state: t.Optional[str] = None, + ) -> TokenStream: + """Called by the parser to do the preprocessing and filtering + for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. + """ + source = self.preprocess(source, name, filename) + stream = self.lexer.tokenize(source, name, filename, state) + + for ext in self.iter_extensions(): + stream = ext.filter_stream(stream) # type: ignore + + if not isinstance(stream, TokenStream): + stream = TokenStream(stream, name, filename) + + return stream + + def _generate( + self, + source: nodes.Template, + name: t.Optional[str], + filename: t.Optional[str], + defer_init: bool = False, + ) -> str: + """Internal hook that can be overridden to hook a different generate + method in. + + .. versionadded:: 2.5 + """ + return generate( # type: ignore + source, + self, + name, + filename, + defer_init=defer_init, + optimized=self.optimized, + ) + + def _compile(self, source: str, filename: str) -> CodeType: + """Internal hook that can be overridden to hook a different compile + method in. + + .. versionadded:: 2.5 + """ + return compile(source, filename, "exec") + + @typing.overload + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[False]" = False, + defer_init: bool = False, + ) -> CodeType: ... + + @typing.overload + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[True]" = ..., + defer_init: bool = False, + ) -> str: ... + + @internalcode + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: bool = False, + defer_init: bool = False, + ) -> t.Union[str, CodeType]: + """Compile a node or template source code. The `name` parameter is + the load name of the template after it was joined using + :meth:`join_path` if necessary, not the filename on the file system. + the `filename` parameter is the estimated filename of the template on + the file system. If the template came from a database or memory this + can be omitted. + + The return value of this method is a python code object. If the `raw` + parameter is `True` the return value will be a string with python + code equivalent to the bytecode returned otherwise. This method is + mainly used internally. + + `defer_init` is use internally to aid the module code generator. This + causes the generated code to be able to import without the global + environment variable to be set. + + .. versionadded:: 2.4 + `defer_init` parameter added. + """ + source_hint = None + try: + if isinstance(source, str): + source_hint = source + source = self._parse(source, name, filename) + source = self._generate(source, name, filename, defer_init=defer_init) + if raw: + return source + if filename is None: + filename = "