diff --git a/.changeset/sixty-trams-show.md b/.changeset/sixty-trams-show.md new file mode 100644 index 0000000000..87e4e8c3b1 --- /dev/null +++ b/.changeset/sixty-trams-show.md @@ -0,0 +1,5 @@ +--- +"@redocly/cli": patch +--- + +Added `scorecard-classic` command to evaluate API descriptions against project scorecard configurations. diff --git a/docs/@v2/commands/scorecard-classic.md b/docs/@v2/commands/scorecard-classic.md new file mode 100644 index 0000000000..0505d473ed --- /dev/null +++ b/docs/@v2/commands/scorecard-classic.md @@ -0,0 +1,182 @@ +# `scorecard-classic` + +## Introduction + +The `scorecard-classic` command evaluates your API descriptions against quality standards defined in your Redocly project's scorecard configuration. +Use this command to validate API quality and track compliance with organizational governance standards across multiple severity levels. + +{% admonition type="info" name="Note" %} +The `scorecard-classic` command requires a scorecard configuration in your Redocly project. You can configure this in your project settings or by providing a `--project-url` flag. Learn more about [configuring scorecards](https://redocly.com/docs/realm/config/scorecard). +{% /admonition %} + +## Usage + +```bash +redocly scorecard-classic --project-url= +redocly scorecard-classic --config= +redocly scorecard-classic --format=json +redocly scorecard-classic --target-level= +redocly scorecard-classic --verbose +``` + +## Options + +| Option | Type | Description | +| -------------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| api | string | Path to the API description filename or alias that you want to evaluate. See [the API section](#specify-api) for more details. | +| --config | string | Specify path to the [configuration file](#use-alternative-configuration-file). | +| --format | string | Format for the output.
**Possible values:** `stylish`, `json`. Default value is `stylish`. | +| --help | boolean | Show help. | +| --project-url | string | URL to the project scorecard configuration. Required if not configured in the Redocly configuration file. Example: `https://app.cloud.redocly.com/org/my-org/projects/my-project/scorecard-classic`. | +| --target-level | string | Target scorecard level to achieve. The command validates that the API meets this level and all preceding levels without errors. Exits with an error if the target level is not achieved. | +| --verbose, -v | boolean | Run the command in verbose mode to display additional information during execution. | + +## Examples + +### Specify API + +You can use the `scorecard-classic` command with an OpenAPI description file path or an API alias defined in your Redocly configuration file. + +```bash +redocly scorecard-classic openapi/openapi.yaml --project-url=https://app.cloud.redocly.com/org/my-org/projects/my-project/scorecard-classic +``` + +In this example, `scorecard-classic` evaluates the specified API description against the scorecard rules defined in the provided project URL. + +### Use alternative configuration file + +By default, the CLI tool looks for the [Redocly configuration file](../configuration/index.md) in the current working directory. +Use the optional `--config` argument to provide an alternative path to a configuration file. + +```bash +redocly scorecard-classic openapi/openapi.yaml --config=./another/directory/redocly.yaml +``` + +### Configure scorecard in redocly.yaml + +You can configure the scorecard project URL in your Redocly configuration file to avoid passing it as a command-line argument: + +```yaml +scorecard: + fromProjectUrl: https://app.cloud.redocly.com/org/my-org/projects/my-project/scorecard-classic + +apis: + core@v1: + root: ./openapi/api-description.json +``` + +With this configuration, you can run the command without the `--project-url` flag: + +```bash +redocly scorecard-classic core@v1 +``` + +### Use JSON output format + +To generate machine-readable output suitable for CI/CD pipelines or further processing, use the JSON format: + +```bash +redocly scorecard-classic openapi/openapi.yaml --format=json +``` + +The JSON output is grouped by scorecard level and includes: + +- version information +- achieved scorecard level +- summary of errors and warnings for each level +- rule ID and documentation link (for built-in rules) +- severity level (error or warning) +- location information (file path, line/column range, and JSON pointer) +- descriptive message about the violation + +### Validate against a target level + +Use the `--target-level` option to ensure your API meets a specific quality level. The command validates that your API satisfies the target level and all preceding levels without errors: + +```bash +redocly scorecard-classic openapi/openapi.yaml --target-level=Gold +``` + +If the API doesn't meet the target level, the command: + +- displays which level was actually achieved +- shows all validation issues preventing the target level from being met +- exits with a non-zero exit code (useful for CI/CD pipelines) + +This is particularly useful in CI/CD pipelines to enforce minimum quality standards before deployment. + +### Run in verbose mode + +For troubleshooting or detailed insights into the scorecard evaluation process, enable verbose mode: + +```bash +redocly scorecard-classic openapi/openapi.yaml --verbose +``` + +Verbose mode displays additional information such as: + +- project URL being used +- authentication status +- detailed logging of the evaluation process + +## Authentication + +The `scorecard-classic` command requires authentication to access your project's scorecard configuration. +You can authenticate in one of two ways: + +### Using API key (recommended for CI/CD) + +Set the `REDOCLY_AUTHORIZATION` environment variable with your API key: + +```bash +export REDOCLY_AUTHORIZATION=your-api-key-here +redocly scorecard-classic openapi/openapi.yaml +``` + +### Interactive login + +If no API key is provided, the command prompts you to log in interactively: + +```bash +redocly scorecard-classic openapi/openapi.yaml +``` + +The CLI opens a browser window for you to authenticate with your Redocly account. + +## Scorecard results + +The scorecard evaluation categorizes issues into multiple levels based on your project's configuration. +Each issue is associated with a specific scorecard level, allowing you to prioritize improvements. + +The command displays the achieved scorecard level, which is the highest level your API meets without errors. +The achieved level is shown in both stylish and JSON output formats. + +When all checks pass, the command displays a success message: + +```text + ā˜‘ļø Achieved Level: Gold + +āœ… No issues found for openapi/openapi.yaml. Your API meets all scorecard requirements. +``` + +When issues are found, the output shows: + +- the achieved scorecard level +- the rule that was violated +- the scorecard level of the rule +- the location in the API description where the issue occurs +- a descriptive message explaining the violation + +If a `--target-level` is specified and not achieved, the command displays an error message and exits with a non-zero code. + +## Related commands + +- [`lint`](./lint.md) - Standard linting for API descriptions with pass/fail results +- [`bundle`](./bundle.md) - Bundle multi-file API descriptions into a single file +- [`stats`](./stats.md) - Display statistics about your API description structure + +## Resources + +- [API governance documentation](../api-standards.md) +- [Redocly configuration guide](../configuration/index.md) +- [Custom rules and plugins](../custom-plugins/index.md) diff --git a/docs/@v2/v2.sidebars.yaml b/docs/@v2/v2.sidebars.yaml index 9ddd428a84..c330b8611e 100644 --- a/docs/@v2/v2.sidebars.yaml +++ b/docs/@v2/v2.sidebars.yaml @@ -34,6 +34,8 @@ page: commands/push-status.md - label: respect page: commands/respect.md + - label: scorecard-classic + page: commands/scorecard-classic.md - label: split page: commands/split.md - label: stats diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/fetch-scorecard.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/fetch-scorecard.test.ts new file mode 100644 index 0000000000..88dafeb7e7 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/fetch-scorecard.test.ts @@ -0,0 +1,282 @@ +import { fetchRemoteScorecardAndPlugins } from '../remote/fetch-scorecard.js'; +import * as errorUtils from '../../../utils/error.js'; + +describe('fetchRemoteScorecardAndPlugins', () => { + const mockFetch = vi.fn(); + const validProjectUrl = 'https://app.valid-url.com/org/test-org/project/test-project'; + const testToken = 'test-token'; + + beforeEach(() => { + global.fetch = mockFetch; + mockFetch.mockClear(); + vi.spyOn(errorUtils, 'exitWithError').mockImplementation(() => { + throw new Error('exitWithError called'); + }); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + delete process.env.REDOCLY_AUTHORIZATION; + vi.restoreAllMocks(); + }); + + it('should handle invalid URL format', async () => { + await expect( + fetchRemoteScorecardAndPlugins({ projectUrl: 'not-a-valid-url', auth: testToken }) + ).rejects.toThrow(); + }); + + it('should throw error when project URL pattern does not match', async () => { + await expect( + fetchRemoteScorecardAndPlugins({ + projectUrl: 'https://example.com/invalid/path', + auth: testToken, + }) + ).rejects.toThrow(); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('Invalid project URL format') + ); + }); + + it('should throw error when project is not found (404)', async () => { + mockFetch.mockResolvedValueOnce({ + status: 404, + }); + + await expect( + fetchRemoteScorecardAndPlugins({ projectUrl: validProjectUrl, auth: testToken }) + ).rejects.toThrow(); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('Failed to fetch project') + ); + }); + + it('should throw error when unauthorized (401)', async () => { + mockFetch.mockResolvedValueOnce({ + status: 401, + }); + + await expect( + fetchRemoteScorecardAndPlugins({ projectUrl: validProjectUrl, auth: testToken }) + ).rejects.toThrow(); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('Unauthorized access to project') + ); + }); + + it('should throw error when forbidden (403)', async () => { + mockFetch.mockResolvedValueOnce({ + status: 403, + }); + + await expect( + fetchRemoteScorecardAndPlugins({ projectUrl: validProjectUrl, auth: testToken }) + ).rejects.toThrow(); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('Unauthorized access to project') + ); + }); + + it('should throw error when project has no scorecard config', async () => { + mockFetch.mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + slug: 'test-project', + config: {}, + }), + }); + + await expect( + fetchRemoteScorecardAndPlugins({ projectUrl: validProjectUrl, auth: testToken }) + ).rejects.toThrow(); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('No scorecard configuration found') + ); + }); + + it('should return scorecard config without plugins when pluginsUrl is not set', async () => { + const mockScorecard = { + levels: [{ name: 'Gold', rules: {} }], + }; + + mockFetch.mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + slug: 'test-project', + config: { + scorecard: mockScorecard, + }, + }), + }); + + const result = await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: testToken, + }); + + expect(result).toEqual({ + scorecard: mockScorecard, + plugins: undefined, + }); + expect(errorUtils.exitWithError).not.toHaveBeenCalled(); + }); + + it('should return scorecard config with plugins when pluginsUrl is set', async () => { + const mockScorecard = { + levels: [{ name: 'Gold', rules: {} }], + }; + const mockPluginsCode = 'export default [() => ({ id: "test-plugin" })]'; + + mockFetch + .mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + slug: 'test-project', + config: { + scorecard: mockScorecard, + pluginsUrl: 'https://example.com/plugins.js', + }, + }), + }) + .mockResolvedValueOnce({ + status: 200, + text: async () => mockPluginsCode, + }); + + const result = await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: testToken, + }); + + expect(result).toEqual({ + scorecard: mockScorecard, + plugins: mockPluginsCode, + }); + expect(mockFetch).toHaveBeenCalledTimes(2); + }); + + it('should return scorecard without plugins when plugin fetch fails', async () => { + const mockScorecard = { + levels: [{ name: 'Gold', rules: {} }], + }; + + mockFetch + .mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + slug: 'test-project', + config: { + scorecard: mockScorecard, + pluginsUrl: 'https://example.com/plugins.js', + }, + }), + }) + .mockResolvedValueOnce({ + status: 404, + }); + + const result = await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: testToken, + }); + + expect(result).toEqual({ + scorecard: mockScorecard, + plugins: undefined, + }); + }); + + it('should use correct auth headers with access token', async () => { + mockFetch.mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + config: { scorecard: { levels: [] } }, + }), + }); + + await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: testToken, + }); + + expect(mockFetch).toHaveBeenCalledWith( + expect.any(URL), + expect.objectContaining({ + headers: { Cookie: `accessToken=${testToken}` }, + }) + ); + }); + + it('should use correct auth headers with API key', async () => { + const apiKey = 'test-api-key'; + process.env.REDOCLY_AUTHORIZATION = apiKey; + + mockFetch.mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + config: { scorecard: { levels: [] } }, + }), + }); + + await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: apiKey, + isApiKey: true, + }); + + expect(mockFetch).toHaveBeenCalledWith( + expect.any(URL), + expect.objectContaining({ + headers: { Authorization: `Bearer ${apiKey}` }, + }) + ); + }); + + it('should handle verbose flag and fetch plugins successfully', async () => { + const mockScorecard = { + levels: [{ name: 'Gold', rules: {} }], + }; + const mockPluginsCode = 'export default [() => ({ id: "test-plugin" })]'; + + mockFetch + .mockResolvedValueOnce({ + status: 200, + json: async () => ({ + id: 'project-123', + slug: 'test-project', + config: { + scorecard: mockScorecard, + pluginsUrl: 'https://example.com/plugins.js', + }, + }), + }) + .mockResolvedValueOnce({ + status: 200, + text: async () => mockPluginsCode, + }); + + const result = await fetchRemoteScorecardAndPlugins({ + projectUrl: validProjectUrl, + auth: testToken, + isApiKey: false, + verbose: true, + }); + + expect(result).toEqual({ + scorecard: mockScorecard, + plugins: mockPluginsCode, + }); + expect(mockFetch).toHaveBeenCalledTimes(2); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/json-formatter.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/json-formatter.test.ts new file mode 100644 index 0000000000..07e49a9786 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/json-formatter.test.ts @@ -0,0 +1,266 @@ +import { printScorecardResultsAsJson } from '../formatters/json-formatter.js'; +import * as openapiCore from '@redocly/openapi-core'; +import type { ScorecardProblem } from '../types.js'; + +const createMockSource = (absoluteRef: string) => ({ + absoluteRef, + getAst: () => ({}), + getRootAst: () => ({}), + getLineColLocation: () => ({ line: 1, col: 1 }), +}); + +describe('printScorecardResultsAsJson', () => { + beforeEach(() => { + vi.spyOn(openapiCore.logger, 'output').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should print empty results when no problems', () => { + printScorecardResultsAsJson([], 'Gold', true); + + expect(openapiCore.logger.output).toHaveBeenCalledWith( + JSON.stringify( + { + version: '1.0', + level: 'Gold', + levels: [], + }, + null, + 2 + ) + ); + }); + + it('should group problems by scorecard level', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error in Gold level', + ruleId: 'test-rule-1', + severity: 'error', + suggest: [], + location: [ + { + source: createMockSource('/test/file.yaml') as any, + pointer: '#/paths/~1test/get', + reportOnKey: false, + }, + ], + scorecardLevel: 'Gold', + }, + { + message: 'Warning in Gold level', + ruleId: 'test-rule-2', + severity: 'warn', + suggest: [], + location: [ + { + source: createMockSource('/test/file.yaml') as any, + pointer: '#/info', + reportOnKey: false, + }, + ], + scorecardLevel: 'Gold', + }, + { + message: 'Error in Silver level', + ruleId: 'test-rule-3', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Silver', + }, + ]; + + printScorecardResultsAsJson(problems, 'Silver', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + expect(output.version).toBe('1.0'); + expect(output.level).toBe('Silver'); + expect(output.levels).toHaveLength(2); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + const silverLevel = output.levels.find((l: any) => l.name === 'Silver'); + + expect(goldLevel.total).toEqual({ errors: 1, warnings: 1 }); + expect(goldLevel.problems).toHaveLength(2); + expect(silverLevel.total).toEqual({ errors: 1, warnings: 0 }); + expect(silverLevel.problems).toHaveLength(1); + }); + + it('should include rule URLs for non-namespaced rules', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Test error', + ruleId: 'operation-summary', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResultsAsJson(problems, 'Gold', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + expect(goldLevel.problems[0].ruleUrl).toBe( + 'https://redocly.com/docs/cli/rules/oas/operation-summary' + ); + }); + + it('should not include rule URLs for namespaced rules', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Test error', + ruleId: 'custom/my-rule', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResultsAsJson(problems, 'Gold', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + expect(goldLevel.problems[0].ruleUrl).toBeUndefined(); + }); + + it('should format location with file path and range', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Test error', + ruleId: 'test-rule', + severity: 'error', + suggest: [], + location: [ + { + source: createMockSource('/test/file.yaml') as any, + pointer: '#/paths/~1test/get', + reportOnKey: false, + }, + ], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResultsAsJson(problems, 'Gold', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + expect(goldLevel.problems[0].location).toHaveLength(1); + expect(goldLevel.problems[0].location[0].file).toBe('/test/file.yaml'); + expect(goldLevel.problems[0].location[0].pointer).toBe('#/paths/~1test/get'); + expect(goldLevel.problems[0].location[0].range).toContain('Line'); + }); + + it('should handle problems with Unknown level', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error without level', + ruleId: 'test-rule', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: undefined, + }, + ]; + + printScorecardResultsAsJson(problems, 'Unknown', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const unknownLevel = output.levels.find((l: any) => l.name === 'Unknown'); + expect(unknownLevel).toBeDefined(); + expect(unknownLevel.problems).toHaveLength(1); + }); + + it('should strip ANSI codes from messages', () => { + const problems: ScorecardProblem[] = [ + { + message: '\u001b[31mError message with color\u001b[0m', + ruleId: 'test-rule', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResultsAsJson(problems, 'Gold', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + expect(goldLevel.problems[0].message).toBe('Error message with color'); + expect(goldLevel.problems[0].message).not.toContain('\u001b'); + }); + + it('should count errors and warnings correctly', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error 1', + ruleId: 'rule-1', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Error 2', + ruleId: 'rule-2', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Warning 1', + ruleId: 'rule-3', + severity: 'warn', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Warning 2', + ruleId: 'rule-4', + severity: 'warn', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Warning 3', + ruleId: 'rule-5', + severity: 'warn', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResultsAsJson(problems, 'Gold', true); + + const outputCall = (openapiCore.logger.output as any).mock.calls[0][0]; + const output = JSON.parse(outputCall); + + const goldLevel = output.levels.find((l: any) => l.name === 'Gold'); + expect(goldLevel.total.errors).toBe(2); + expect(goldLevel.total.warnings).toBe(3); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/login-handler.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/login-handler.test.ts new file mode 100644 index 0000000000..4326c6e0be --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/login-handler.test.ts @@ -0,0 +1,107 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { handleLoginAndFetchToken } from '../auth/login-handler.js'; +import * as errorUtils from '../../../utils/error.js'; +import { RedoclyOAuthClient } from '../../../auth/oauth-client.js'; +import { logger } from '@redocly/openapi-core'; + +vi.mock('../../../auth/oauth-client.js'); +vi.mock('../../../reunite/api/index.js', () => ({ + getReuniteUrl: vi.fn(() => 'https://www.test.com'), +})); + +describe('handleLoginAndFetchToken', () => { + const mockConfig = { + resolvedConfig: { + residency: 'us', + }, + } as any; + + let mockOAuthClient: any; + + beforeEach(() => { + mockOAuthClient = { + getAccessToken: vi.fn(), + login: vi.fn(), + }; + vi.mocked(RedoclyOAuthClient).mockImplementation(() => mockOAuthClient); + vi.spyOn(logger, 'info').mockImplementation(() => {}); + vi.spyOn(logger, 'warn').mockImplementation(() => {}); + vi.spyOn(logger, 'error').mockImplementation(() => {}); + vi.spyOn(errorUtils, 'exitWithError').mockImplementation(() => { + throw new Error('exitWithError called'); + }); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should return existing access token when available', async () => { + const testToken = 'existing-token'; + mockOAuthClient.getAccessToken.mockResolvedValue(testToken); + + const result = await handleLoginAndFetchToken(mockConfig, false); + + expect(result).toBe(testToken); + expect(mockOAuthClient.getAccessToken).toHaveBeenCalledTimes(1); + expect(mockOAuthClient.login).not.toHaveBeenCalled(); + }); + + it('should log info when verbose is enabled and token exists', async () => { + const testToken = 'existing-token'; + mockOAuthClient.getAccessToken.mockResolvedValue(testToken); + + await handleLoginAndFetchToken(mockConfig, true); + + expect(logger.info).toHaveBeenCalledWith('Using existing access token.\n'); + }); + + it('should attempt login when no access token is found', async () => { + const newToken = 'new-token'; + mockOAuthClient.getAccessToken.mockResolvedValueOnce(null).mockResolvedValueOnce(newToken); + mockOAuthClient.login.mockResolvedValue(undefined); + + const result = await handleLoginAndFetchToken(mockConfig, false); + + expect(result).toBe(newToken); + expect(mockOAuthClient.login).toHaveBeenCalled(); + expect(mockOAuthClient.getAccessToken).toHaveBeenCalledTimes(2); + }); + + it('should log warning when verbose is enabled and no token found', async () => { + const newToken = 'new-token'; + mockOAuthClient.getAccessToken.mockResolvedValueOnce(null).mockResolvedValueOnce(newToken); + mockOAuthClient.login.mockResolvedValue(undefined); + + await handleLoginAndFetchToken(mockConfig, true); + + expect(logger.warn).toHaveBeenCalledWith( + 'No valid access token found or refresh token expired. Attempting login...\n' + ); + }); + + it('should handle login failure and exit with error', async () => { + const loginError = new Error('Login failed'); + mockOAuthClient.getAccessToken.mockResolvedValue(null); + mockOAuthClient.login.mockRejectedValue(loginError); + + await expect(handleLoginAndFetchToken(mockConfig, false)).rejects.toThrow( + 'exitWithError called' + ); + + expect(errorUtils.exitWithError).toHaveBeenCalledWith( + expect.stringContaining('Login failed. Please try again or check your connection') + ); + }); + + it('should log error details when verbose is enabled and login fails', async () => { + const loginError = new Error('Network error'); + mockOAuthClient.getAccessToken.mockResolvedValue(null); + mockOAuthClient.login.mockRejectedValue(loginError); + + await expect(handleLoginAndFetchToken(mockConfig, true)).rejects.toThrow(); + + expect(logger.error).toHaveBeenCalledWith('āŒ Login failed.\n'); + expect(logger.error).toHaveBeenCalledWith('Error details: Network error\n'); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/plugin-evaluator.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/plugin-evaluator.test.ts new file mode 100644 index 0000000000..37eb81de3b --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/plugin-evaluator.test.ts @@ -0,0 +1,66 @@ +import { evaluatePluginsFromCode } from '../validation/plugin-evaluator.js'; + +describe('evaluatePluginsFromCode', () => { + it('should return empty array when no plugins code provided', async () => { + const result = await evaluatePluginsFromCode(undefined); + expect(result).toEqual([]); + }); + + it('should return empty array when empty string provided', async () => { + const result = await evaluatePluginsFromCode(''); + expect(result).toEqual([]); + }); + + it('should return empty array on invalid plugin code', async () => { + const result = await evaluatePluginsFromCode('invalid code'); + expect(result).toEqual([]); + }); + + it('should evaluate valid plugin code and return plugins', async () => { + const validPluginCode = ` + export default [ + () => ({ + id: 'test-plugin', + rules: { + oas3: { + 'test-rule': () => ({}) + } + } + }) + ]; + `; + + const result = await evaluatePluginsFromCode(validPluginCode); + expect(result).toHaveLength(1); + expect(result[0]).toHaveProperty('id', 'test-plugin'); + }); + + it('should handle __redocly_dirname replacement', async () => { + const pluginCodeWithDirname = ` + const dirname = __redocly_dirname; + export default [() => ({ id: 'test', dirname })]; + `; + + const result = await evaluatePluginsFromCode(pluginCodeWithDirname); + expect(result).toHaveLength(1); + }); + + it('should handle verbose flag', async () => { + const validPluginCode = ` + export default [ + () => ({ + id: 'verbose-test-plugin', + rules: { + oas3: { + 'test-rule': () => ({}) + } + } + }) + ]; + `; + + const result = await evaluatePluginsFromCode(validPluginCode, true); + expect(result).toHaveLength(1); + expect(result[0]).toHaveProperty('id', 'verbose-test-plugin'); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/stylish-formatter.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/stylish-formatter.test.ts new file mode 100644 index 0000000000..1ba3d59002 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/stylish-formatter.test.ts @@ -0,0 +1,135 @@ +import { printScorecardResults } from '../formatters/stylish-formatter.js'; +import * as openapiCore from '@redocly/openapi-core'; +import type { ScorecardProblem } from '../types.js'; + +const createMockSource = (absoluteRef: string) => ({ + absoluteRef, + getAst: () => ({}), + getRootAst: () => ({}), + getLineColLocation: () => ({ line: 1, col: 1 }), +}); + +describe('printScorecardResults', () => { + beforeEach(() => { + vi.spyOn(openapiCore.logger, 'info').mockImplementation(() => {}); + vi.spyOn(openapiCore.logger, 'output').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should handle problems without location', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error without location', + ruleId: 'test-rule', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResults(problems, 'Gold', true); + + expect(openapiCore.logger.info).toHaveBeenCalledWith( + expect.stringMatching(/Found.*1.*error.*0.*warning.*1.*level/) + ); + expect(openapiCore.logger.output).toHaveBeenCalledWith( + expect.stringContaining('Achieved Level: ') + ); + expect(openapiCore.logger.output).toHaveBeenCalledWith(expect.stringContaining('šŸ“‹ Gold')); + }); + + it('should handle problems with Unknown level', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error without level', + ruleId: 'test-rule', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: undefined, + }, + ]; + + printScorecardResults(problems, 'Unknown', true); + + expect(openapiCore.logger.output).toHaveBeenCalledWith(expect.stringContaining('Unknown')); + }); + + it('should show correct severity counts per level', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error 1', + ruleId: 'rule-1', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Error 2', + ruleId: 'rule-2', + severity: 'error', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + { + message: 'Warning 1', + ruleId: 'rule-3', + severity: 'warn', + suggest: [], + location: [], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResults(problems, 'Gold', true); + + expect(openapiCore.logger.output).toHaveBeenCalledWith(expect.stringContaining('šŸ“‹ Gold')); + expect(openapiCore.logger.output).toHaveBeenCalledWith( + expect.stringMatching(/2.*error.*1.*warning/) + ); + }); + + it('should calculate correct padding for alignment', () => { + const problems: ScorecardProblem[] = [ + { + message: 'Error 1', + ruleId: 'short', + severity: 'error', + suggest: [], + location: [ + { + source: createMockSource('/test/file.yaml') as any, + pointer: '#/paths/~1test/get', + reportOnKey: false, + }, + ], + scorecardLevel: 'Gold', + }, + { + message: 'Error 2', + ruleId: 'very-long-rule-id-name', + severity: 'error', + suggest: [], + location: [ + { + source: createMockSource('/test/file.yaml') as any, + pointer: '#/info', + reportOnKey: false, + }, + ], + scorecardLevel: 'Gold', + }, + ]; + + printScorecardResults(problems, 'Gold', true); + + // Should have 4 calls: 1 for level header + 2 for problems + 1 for achieved level + expect(openapiCore.logger.output).toHaveBeenCalledTimes(4); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/__tests__/validate-scorecard.test.ts b/packages/cli/src/commands/scorecard-classic/__tests__/validate-scorecard.test.ts new file mode 100644 index 0000000000..e91788f7f4 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/__tests__/validate-scorecard.test.ts @@ -0,0 +1,359 @@ +import { validateScorecard } from '../validation/validate-scorecard.js'; +import * as openapiCore from '@redocly/openapi-core'; +import { evaluatePluginsFromCode } from '../validation/plugin-evaluator.js'; + +vi.mock('../validation/plugin-evaluator.js', () => ({ + evaluatePluginsFromCode: vi.fn(), +})); + +describe('validateScorecard', () => { + const mockDocument = { + parsed: { + openapi: '3.0.0', + info: { title: 'Test API', version: '1.0.0' }, + paths: {}, + }, + source: { + absoluteRef: 'test.yaml', + }, + } as any; + + const mockResolver = {} as any; + + beforeEach(() => { + vi.clearAllMocks(); + vi.spyOn(openapiCore, 'createConfig').mockResolvedValue({} as any); + vi.spyOn(openapiCore, 'lintDocument').mockResolvedValue([]); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should return empty array when no scorecard levels defined', async () => { + const scorecardConfig = { levels: [] }; + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result).toEqual({ + achievedLevel: 'Non Conformant', + problems: [], + targetLevelAchieved: true, + }); + expect(openapiCore.lintDocument).not.toHaveBeenCalled(); + }); + + it('should validate each scorecard level', async () => { + const scorecardConfig = { + levels: [ + { name: 'Baseline', rules: {} }, + { name: 'Gold', rules: {} }, + ], + }; + + await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(openapiCore.createConfig).toHaveBeenCalledTimes(2); + expect(openapiCore.lintDocument).toHaveBeenCalledTimes(2); + }); + + it('should attach scorecard level name to problems', async () => { + const scorecardConfig = { + levels: [{ name: 'Gold', rules: {} }], + }; + + const mockProblems = [ + { + message: 'Test error', + ruleId: 'test-rule', + severity: 'error', + location: [], + ignored: false, + }, + ]; + + vi.mocked(openapiCore.lintDocument).mockResolvedValue(mockProblems as any); + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.problems).toHaveLength(1); + expect(result.problems[0].scorecardLevel).toBe('Gold'); + expect(result.problems[0].message).toBe('Test error'); + }); + + it('should filter out ignored problems', async () => { + const scorecardConfig = { + levels: [{ name: 'Baseline', rules: {} }], + }; + + const mockProblems = [ + { + message: 'Error 1', + ruleId: 'rule-1', + severity: 'error', + location: [], + ignored: false, + }, + { + message: 'Error 2', + ruleId: 'rule-2', + severity: 'error', + location: [], + ignored: true, + }, + ]; + + vi.mocked(openapiCore.lintDocument).mockResolvedValue(mockProblems as any); + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.problems).toHaveLength(1); + expect(result.problems[0].message).toBe('Error 1'); + }); + + it('should evaluate plugins from code when string provided', async () => { + const scorecardConfig = { + levels: [{ name: 'Gold', rules: {} }], + }; + + const mockPlugins = [{ id: 'test-plugin' }]; + vi.mocked(evaluatePluginsFromCode).mockResolvedValue(mockPlugins); + + await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + pluginsCodeOrPlugins: 'plugin-code', + }); + + expect(evaluatePluginsFromCode).toHaveBeenCalledWith('plugin-code', false); + expect(openapiCore.createConfig).toHaveBeenCalledWith( + expect.objectContaining({ plugins: mockPlugins }), + expect.any(Object) + ); + }); + + it('should use plugins directly when array provided', async () => { + const scorecardConfig = { + levels: [{ name: 'Gold', rules: {} }], + }; + + const mockPlugins = [{ id: 'test-plugin' }]; + + await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + pluginsCodeOrPlugins: mockPlugins, + }); + + expect(evaluatePluginsFromCode).not.toHaveBeenCalled(); + expect(openapiCore.createConfig).toHaveBeenCalledWith( + expect.objectContaining({ plugins: mockPlugins }), + expect.any(Object) + ); + }); + + it('should handle verbose flag', async () => { + const scorecardConfig = { + levels: [{ name: 'Gold', rules: {} }], + }; + + const mockPlugins = [{ id: 'test-plugin' }]; + vi.mocked(evaluatePluginsFromCode).mockResolvedValue(mockPlugins); + + await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + pluginsCodeOrPlugins: 'plugin-code', + verbose: true, + }); + + expect(evaluatePluginsFromCode).toHaveBeenCalledWith('plugin-code', true); + expect(openapiCore.createConfig).toHaveBeenCalledWith( + expect.objectContaining({ plugins: mockPlugins }), + expect.any(Object) + ); + }); + + describe('determineAchievedLevel', () => { + it('should return highest level when all levels pass without problems', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + { name: 'Gold', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument).mockResolvedValue([]); + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.achievedLevel).toBe('Gold'); + expect(result.targetLevelAchieved).toBe(true); + }); + + it('should return previous level when current level has errors', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + { name: 'Gold', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument) + .mockResolvedValueOnce([]) // Bronze: no problems + .mockResolvedValueOnce([ + { + message: 'Silver level error', + ruleId: 'test-rule', + severity: 'error', + location: [], + ignored: false, + }, + ] as any); // Silver: has error + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.achievedLevel).toBe('Bronze'); + expect(result.problems).toHaveLength(1); + }); + + it('should achieve level even with warnings (only errors prevent achievement)', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument) + .mockResolvedValueOnce([]) // Bronze: no problems + .mockResolvedValueOnce([ + { + message: 'Silver level warning', + ruleId: 'test-rule', + severity: 'warn', + location: [], + ignored: false, + }, + ] as any); // Silver: has warning but no errors + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.achievedLevel).toBe('Silver'); + expect(result.problems).toHaveLength(1); + }); + + it('should return "Non Conformant" when first level has problems', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument).mockResolvedValue([ + { + message: 'Bronze level error', + ruleId: 'test-rule', + severity: 'error', + location: [], + ignored: false, + }, + ] as any); + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + }); + + expect(result.achievedLevel).toBe('Non Conformant'); + }); + + it('should return target level when specified and achieved', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + { name: 'Gold', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument).mockResolvedValue([]); + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + targetLevel: 'Silver', + }); + + expect(result.achievedLevel).toBe('Silver'); + expect(result.targetLevelAchieved).toBe(true); + }); + + it('should indicate target level not achieved when level has problems', async () => { + const scorecardConfig = { + levels: [ + { name: 'Bronze', rules: {} }, + { name: 'Silver', rules: {} }, + ], + }; + + vi.mocked(openapiCore.lintDocument) + .mockResolvedValueOnce([]) // Bronze: no problems + .mockResolvedValueOnce([ + { + message: 'Silver level error', + ruleId: 'test-rule', + severity: 'error', + location: [], + ignored: false, + }, + ] as any); // Silver: has error + + const result = await validateScorecard({ + document: mockDocument, + externalRefResolver: mockResolver, + scorecardConfig, + targetLevel: 'Silver', + }); + + expect(result.achievedLevel).toBe('Bronze'); + expect(result.targetLevelAchieved).toBe(false); + }); + }); +}); diff --git a/packages/cli/src/commands/scorecard-classic/auth/login-handler.ts b/packages/cli/src/commands/scorecard-classic/auth/login-handler.ts new file mode 100644 index 0000000000..c816c26e0f --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/auth/login-handler.ts @@ -0,0 +1,40 @@ +import { logger } from '@redocly/openapi-core'; +import { RedoclyOAuthClient } from '../../../auth/oauth-client.js'; +import { getReuniteUrl } from '../../../reunite/api/index.js'; +import { exitWithError } from '../../../utils/error.js'; + +import type { Config } from '@redocly/openapi-core'; + +export async function handleLoginAndFetchToken( + config: Config, + verbose = false +): Promise { + const reuniteUrl = getReuniteUrl(config, config.resolvedConfig?.residency); + + const oauthClient = new RedoclyOAuthClient(); + let accessToken = await oauthClient.getAccessToken(reuniteUrl); + + if (accessToken) { + if (verbose) { + logger.info(`Using existing access token.\n`); + } + return accessToken; + } + + if (verbose) { + logger.warn(`No valid access token found or refresh token expired. Attempting login...\n`); + } + + try { + await oauthClient.login(reuniteUrl); + accessToken = await oauthClient.getAccessToken(reuniteUrl); + } catch (error) { + if (verbose) { + logger.error(`āŒ Login failed.\n`); + logger.error(`Error details: ${error.message}\n`); + } + exitWithError(`Login failed. Please try again or check your connection to ${reuniteUrl}.`); + } + + return accessToken; +} diff --git a/packages/cli/src/commands/scorecard-classic/formatters/json-formatter.ts b/packages/cli/src/commands/scorecard-classic/formatters/json-formatter.ts new file mode 100644 index 0000000000..12a81d5561 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/formatters/json-formatter.ts @@ -0,0 +1,115 @@ +import { logger, getLineColLocation } from '@redocly/openapi-core'; + +import type { ScorecardProblem } from '../types.js'; + +type ScorecardLevel = { + name: string; + total: { + errors: number; + warnings: number; + }; + problems: Array<{ + ruleId: string; + ruleUrl?: string; + severity: string; + message: string; + location: { + file: string; + range: string; + pointer?: string; + }[]; + }>; +}; + +export type ScorecardJsonOutput = { + version: string; + level?: string; + levels: ScorecardLevel[]; +}; + +function formatRange( + start: { line: number; col: number }, + end?: { line: number; col: number } +): string { + const startStr = `Line ${start.line}, Col ${start.col}`; + if (!end) { + return startStr; + } + const endStr = `Line ${end.line}, Col ${end.col}`; + return `${startStr} → ${endStr}`; +} + +function getRuleUrl(ruleId: string): string | undefined { + if (!ruleId.includes('/')) { + return `https://redocly.com/docs/cli/rules/oas/${ruleId}`; + } + return undefined; +} + +function stripAnsiCodes(text: string): string { + // eslint-disable-next-line no-control-regex + return text.replace(/\u001b\[\d+m/g, ''); +} + +export function printScorecardResultsAsJson( + problems: ScorecardProblem[], + achievedLevel: string, + targetLevelAchieved: boolean, + version: string = '1.0' +): void { + const groupedByLevel: Record = {}; + + for (const problem of problems) { + const level = problem.scorecardLevel || 'Unknown'; + if (!groupedByLevel[level]) { + groupedByLevel[level] = []; + } + groupedByLevel[level].push(problem); + } + + const levels: ScorecardLevel[] = []; + + for (const [levelName, levelProblems] of Object.entries(groupedByLevel)) { + let errors = 0; + let warnings = 0; + + const formattedProblems = levelProblems.map((problem) => { + if (problem.severity === 'error') errors++; + if (problem.severity === 'warn') warnings++; + + return { + ruleId: problem.ruleId, + ruleUrl: getRuleUrl(problem.ruleId), + severity: problem.severity, + message: stripAnsiCodes(problem.message), + + location: problem.location.map((loc) => { + const lineCol = getLineColLocation(loc); + return { + file: loc.source.absoluteRef, + range: formatRange(lineCol.start, lineCol.end), + pointer: loc.pointer, + }; + }), + }; + }); + + levels.push({ + name: levelName, + total: { + errors, + warnings, + }, + problems: formattedProblems, + }); + } + + const output: ScorecardJsonOutput = { + version, + ...(targetLevelAchieved ? { level: achievedLevel } : {}), + levels, + }; + + logger.output(JSON.stringify(output, null, 2)); + logger.info('\n'); +} diff --git a/packages/cli/src/commands/scorecard-classic/formatters/stylish-formatter.ts b/packages/cli/src/commands/scorecard-classic/formatters/stylish-formatter.ts new file mode 100644 index 0000000000..479814b8d8 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/formatters/stylish-formatter.ts @@ -0,0 +1,98 @@ +import { logger, getLineColLocation, pluralize } from '@redocly/openapi-core'; +import { gray, yellow, red, cyan, bold, white } from 'colorette'; + +import type { ScorecardProblem } from '../types.js'; + +function formatStylishProblem( + problem: ScorecardProblem, + locationPad: number, + ruleIdPad: number +): string { + const severityColor = + problem.severity === 'error' ? red : problem.severity === 'warn' ? yellow : gray; + + const loc = problem.location?.[0]; + let line = 0; + let column = 0; + + if (loc) { + const lineColLoc = getLineColLocation(loc); + line = lineColLoc.start.line; + column = lineColLoc.start.col; + } + + const location = `${line}:${column}`.padEnd(locationPad); + const severity = severityColor(problem.severity.padEnd(7)); + const ruleId = problem.ruleId.padEnd(ruleIdPad); + const level = cyan(`[${problem.scorecardLevel || 'Unknown'}]`); + + return ` ${location} ${severity} ${level} ${ruleId} ${problem.message}`; +} + +export function printScorecardResults( + problems: ScorecardProblem[], + achievedLevel: string, + targetLevelAchieved: boolean +): void { + const problemsByLevel = problems.reduce((acc, problem) => { + const level = problem.scorecardLevel || 'Unknown'; + if (!acc[level]) { + acc[level] = []; + } + acc[level].push(problem); + return acc; + }, {} as Record); + + const totalErrors = problems.filter((p) => p.severity === 'error').length; + const totalWarnings = problems.filter((p) => p.severity === 'warn').length; + const levelCount = Object.keys(problemsByLevel).length; + + logger.info( + white( + `Found ${bold(red(totalErrors.toString()))} ${pluralize('error', totalErrors)} and ${bold( + yellow(totalWarnings.toString()) + )} ${pluralize('warning', totalWarnings)} across ${bold( + cyan(levelCount.toString()) + )} ${pluralize('level', levelCount)}\n` + ) + ); + + targetLevelAchieved && + logger.output(white(bold(`\n ā˜‘ļø Achieved Level: ${cyan(achievedLevel)}\n`))); + + for (const [level, levelProblems] of Object.entries(problemsByLevel)) { + const severityCounts = levelProblems.reduce((acc, p) => { + acc[p.severity] = (acc[p.severity] || 0) + 1; + return acc; + }, {} as Record); + + logger.output( + bold(cyan(`\n šŸ“‹ ${level}`)) + + gray( + ` (${severityCounts.error || 0} ${pluralize('error', severityCounts.error || 0)}, ${ + severityCounts.warn || 0 + } ${pluralize('warning', severityCounts.warn || 0)}) \n` + ) + ); + + const locationPad = Math.max( + ...levelProblems.map((p) => { + const loc = p.location?.[0]; + if (loc) { + const lineColLoc = getLineColLocation(loc); + return `${lineColLoc.start.line}:${lineColLoc.start.col}`.length; + } + return 3; + }), + 8 + ); + + const ruleIdPad = Math.max(...levelProblems.map((p) => p.ruleId.length)); + + for (const problem of levelProblems) { + logger.output(`${formatStylishProblem(problem, locationPad, ruleIdPad)}\n`); + } + + logger.info(''); + } +} diff --git a/packages/cli/src/commands/scorecard-classic/index.ts b/packages/cli/src/commands/scorecard-classic/index.ts new file mode 100644 index 0000000000..0460230d59 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/index.ts @@ -0,0 +1,130 @@ +import { formatPath, getExecutionTime, getFallbackApisOrExit } from '../../utils/miscellaneous.js'; +import { BaseResolver, logger } from '@redocly/openapi-core'; +import { AbortFlowError, exitWithError } from '../../utils/error.js'; +import { handleLoginAndFetchToken } from './auth/login-handler.js'; +import { printScorecardResults } from './formatters/stylish-formatter.js'; +import { printScorecardResultsAsJson } from './formatters/json-formatter.js'; +import { fetchRemoteScorecardAndPlugins } from './remote/fetch-scorecard.js'; +import { validateScorecard } from './validation/validate-scorecard.js'; +import { blue, bold, cyan, gray, green, white } from 'colorette'; + +import type { ScorecardClassicArgv } from './types.js'; +import type { CommandArgs } from '../../wrapper.js'; +import type { Document } from '@redocly/openapi-core'; + +export async function handleScorecardClassic({ + argv, + config, + version, + collectSpecData, +}: CommandArgs) { + const startedAt = performance.now(); + const apis = await getFallbackApisOrExit(argv.api ? [argv.api] : [], config); + if (!apis.length) { + exitWithError('No APIs were provided.'); + } + + const path = apis[0].path; + const externalRefResolver = new BaseResolver(config.resolve); + const document = (await externalRefResolver.resolveDocument(null, path, true)) as Document; + const targetLevel = argv['target-level']; + collectSpecData?.(document.parsed); + + const projectUrl = + argv['project-url'] || + config.resolvedConfig.scorecardClassic?.fromProjectUrl || + config.resolvedConfig.scorecard?.fromProjectUrl; + const apiKey = process.env.REDOCLY_AUTHORIZATION; + + if (argv.verbose) { + logger.info(`Project URL: ${projectUrl || 'not configured'}\n`); + } + + if (!projectUrl) { + exitWithError( + 'Scorecard is not configured. Please provide it via --project-url flag or configure it in redocly.yaml. Learn more: https://redocly.com/docs/realm/config/scorecard#fromprojecturl-example' + ); + } + + if (isNonInteractiveEnvironment() && !apiKey) { + exitWithError( + 'Please provide an API key using the REDOCLY_AUTHORIZATION environment variable.\n' + ); + } + + const auth = apiKey || (await handleLoginAndFetchToken(config, argv.verbose)); + + if (!auth) { + exitWithError('Failed to obtain access token or API key.'); + } + + const remoteScorecardAndPlugins = await fetchRemoteScorecardAndPlugins({ + projectUrl, + auth, + isApiKey: !!apiKey, + verbose: argv.verbose, + }); + + logger.info(gray(`\nRunning scorecard for ${formatPath(path)}...\n`)); + const { + problems: result, + achievedLevel, + targetLevelAchieved, + } = await validateScorecard({ + document, + externalRefResolver, + scorecardConfig: remoteScorecardAndPlugins.scorecard!, + configPath: config.configPath, + pluginsCodeOrPlugins: remoteScorecardAndPlugins?.plugins, + targetLevel, + verbose: argv.verbose, + }); + + if (result.length === 0) { + logger.output(white(bold(`\n ā˜‘ļø Achieved Level: ${cyan(achievedLevel)}\n`))); + + logger.output( + green( + `āœ… No issues found for ${blue( + formatPath(path) + )}. Your API meets all scorecard requirements.\n` + ) + ); + return; + } + + if (targetLevel && !targetLevelAchieved) { + logger.error( + `\nāŒ Your API specification does not satisfy the target scorecard level "${targetLevel}".\n` + ); + } + + if (argv.format === 'json') { + printScorecardResultsAsJson(result, achievedLevel, targetLevelAchieved, version); + } else { + printScorecardResults(result, achievedLevel, targetLevelAchieved); + } + + const elapsed = getExecutionTime(startedAt); + logger.info( + `šŸ“Š Scorecard results for ${blue(formatPath(path))} at ${blue(path || 'stdout')} ${green( + elapsed + )}.\n` + ); + + if (targetLevel && !targetLevelAchieved) { + throw new AbortFlowError('Target scorecard level not achieved.'); + } else if (achievedLevel !== 'Non Conformant') { + return; + } + + throw new AbortFlowError('Scorecard validation failed.'); +} + +function isNonInteractiveEnvironment(): boolean { + if (process.env.CI || !process.stdin.isTTY) { + return true; + } + + return false; +} diff --git a/packages/cli/src/commands/scorecard-classic/remote/fetch-scorecard.ts b/packages/cli/src/commands/scorecard-classic/remote/fetch-scorecard.ts new file mode 100644 index 0000000000..3849ee7654 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/remote/fetch-scorecard.ts @@ -0,0 +1,181 @@ +import { logger } from '@redocly/openapi-core'; +import { exitWithError } from '../../../utils/error.js'; + +import type { RemoteScorecardAndPlugins, Project } from '../types.js'; + +export type FetchRemoteScorecardAndPluginsParams = { + projectUrl: string; + auth: string; + isApiKey?: boolean; + verbose?: boolean; +}; + +export async function fetchRemoteScorecardAndPlugins({ + projectUrl, + auth, + isApiKey = false, + verbose = false, +}: FetchRemoteScorecardAndPluginsParams): Promise { + if (verbose) { + logger.info(`Starting fetch for remote scorecard configuration...\n`); + } + + const parsedProjectUrl = parseProjectUrl(projectUrl); + + if (!parsedProjectUrl) { + exitWithError(`Invalid project URL format: ${projectUrl}`); + } + + const { residency, orgSlug, projectSlug } = parsedProjectUrl; + + try { + const project = await fetchProjectConfigBySlugs({ + residency, + orgSlug, + projectSlug, + auth, + isApiKey, + verbose, + }); + const scorecard = project?.config.scorecardClassic || project?.config.scorecard; + + if (!scorecard) { + throw new Error('No scorecard configuration found.'); + } + + if (verbose) { + logger.info(`Successfully fetched scorecard configuration.\n`); + logger.info(`Scorecard levels found: ${scorecard.levels?.length || 0}\n`); + } + + const plugins = project.config.pluginsUrl + ? await fetchPlugins(project.config.pluginsUrl, verbose) + : undefined; + + if (verbose) { + if (plugins) { + logger.info(`Successfully fetched plugins from ${project.config.pluginsUrl}\n`); + } else if (project.config.pluginsUrl) { + logger.info(`No plugins were loaded from ${project.config.pluginsUrl}\n`); + } else { + logger.info(`No custom plugins configured for this scorecard.\n`); + } + } + + return { + scorecard: scorecard!, + plugins, + }; + } catch (error) { + if (verbose) { + logger.error(`āŒ Failed to fetch remote scorecard configuration.\n`); + logger.error(`Error details: ${error.message}\n`); + if (error.stack) { + logger.error(`Stack trace:\n${error.stack}\n`); + } + } + exitWithError(error.message); + } +} + +function parseProjectUrl( + projectUrl: string +): { residency: string; orgSlug: string; projectSlug: string } | undefined { + const url = new URL(projectUrl); + const match = url.pathname.match(/\/org\/(?[^/]+)\/project\/(?[^/]+)/); + + if (!match?.groups) { + return; + } + + const { orgSlug, projectSlug } = match.groups; + + return { + residency: url.origin, + orgSlug, + projectSlug, + }; +} + +type FetchProjectConfigBySlugsParams = { + residency: string; + orgSlug: string; + projectSlug: string; + auth: string; + isApiKey: boolean; + verbose?: boolean; +}; + +async function fetchProjectConfigBySlugs({ + residency, + orgSlug, + projectSlug, + auth, + isApiKey, + verbose = false, +}: FetchProjectConfigBySlugsParams): Promise { + const authHeaders = createAuthHeaders(auth, isApiKey); + const projectUrl = new URL(`${residency}/api/orgs/${orgSlug}/projects/${projectSlug}`); + + const projectResponse = await fetch(projectUrl, { headers: authHeaders }); + + if (verbose) { + logger.info(`Project fetch response status: ${projectResponse.status}\n`); + } + + if (projectResponse.status === 401 || projectResponse.status === 403) { + if (verbose) { + logger.error(`Authentication failed with status ${projectResponse.status}.\n`); + logger.error(`Check that your credentials are valid and have the necessary permissions.\n`); + } + throw new Error( + `Unauthorized access to project: ${projectSlug}. Please check your credentials.` + ); + } + + if (projectResponse.status !== 200) { + throw new Error(`Failed to fetch project: ${projectSlug}. Status: ${projectResponse.status}`); + } + + if (verbose) { + logger.info(`Successfully received project configuration.\n`); + } + + return projectResponse.json(); +} + +async function fetchPlugins(pluginsUrl: string, verbose = false): Promise { + if (verbose) { + logger.info(`Fetching plugins from: ${pluginsUrl}\n`); + } + + try { + const pluginsResponse = await fetch(pluginsUrl); + + if (verbose) { + logger.info(`Plugins fetch response status: ${pluginsResponse.status}\n`); + } + + if (pluginsResponse.status !== 200) { + if (verbose) { + logger.error(`Failed to fetch plugins\n`); + } + return; + } + + return pluginsResponse.text(); + } catch (error) { + if (verbose) { + logger.error(`Error fetching plugins: ${error.message}\n`); + } + return; + } +} + +function createAuthHeaders(auth: string, isApiKey: boolean): Record { + if (isApiKey) { + return { Authorization: `Bearer ${auth}` }; + } + + return { Cookie: `accessToken=${auth}` }; +} diff --git a/packages/cli/src/commands/scorecard-classic/types.ts b/packages/cli/src/commands/scorecard-classic/types.ts new file mode 100644 index 0000000000..721b1063a1 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/types.ts @@ -0,0 +1,23 @@ +import type { NormalizedProblem, OutputFormat, ResolvedConfig } from '@redocly/openapi-core'; + +export type ScorecardClassicArgv = { + api: string; + config: string; + 'project-url'?: string; + format: OutputFormat; + 'target-level'?: string; + verbose?: boolean; +}; + +export type ScorecardProblem = NormalizedProblem & { scorecardLevel?: string }; + +export type RemoteScorecardAndPlugins = { + scorecard: ResolvedConfig['scorecard']; + plugins: string | undefined; +}; + +export type Project = { + id: `prj_${string}`; + slug: string; + config: ResolvedConfig & { pluginsUrl?: string; scorecardClassic?: ResolvedConfig['scorecard'] }; +}; diff --git a/packages/cli/src/commands/scorecard-classic/validation/plugin-evaluator.ts b/packages/cli/src/commands/scorecard-classic/validation/plugin-evaluator.ts new file mode 100644 index 0000000000..744e83e8e7 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/validation/plugin-evaluator.ts @@ -0,0 +1,68 @@ +import { logger, pluralize } from '@redocly/openapi-core'; + +import type { Plugin } from '@redocly/openapi-core'; + +type PluginFunction = () => Plugin; + +type PluginsModule = { + default: PluginFunction[]; +}; + +export async function evaluatePluginsFromCode( + pluginsCode?: string, + verbose = false +): Promise { + if (!pluginsCode) { + if (verbose) { + logger.info(`No plugins code provided to evaluate.\n`); + } + return []; + } + + if (verbose) { + logger.info(`Starting plugin evaluation...\n`); + } + + try { + const dirname = import.meta.url; + const pluginsCodeWithDirname = pluginsCode.replaceAll('__redocly_dirname', `"${dirname}"`); + + if (verbose) { + logger.info(`Encoding plugins code to base64 data URI...\n`); + } + + const base64 = btoa(pluginsCodeWithDirname); + const dataUri = `data:text/javascript;base64,${base64}`; + + if (verbose) { + logger.info(`Importing plugins module dynamically...\n`); + } + + const module: PluginsModule = await import(dataUri); + const evaluatedPlugins = module.default.map((pluginFunction) => pluginFunction()); + + if (verbose) { + logger.info( + `Successfully evaluated ${evaluatedPlugins.length} ${pluralize( + 'plugin', + evaluatedPlugins.length + )}.\n` + ); + evaluatedPlugins.forEach((plugin, index) => { + logger.info(` Plugin ${index + 1}: ${plugin.id || 'unnamed'}\n`); + }); + } + + return evaluatedPlugins; + } catch (error) { + if (verbose) { + logger.error(`āŒ Failed to evaluate plugins.\n`); + logger.error(`Error details: ${error.message}\n`); + if (error.stack) { + logger.error(`Stack trace:\n${error.stack}\n`); + } + } + logger.warn(`Something went wrong during plugins evaluation.`); + return []; + } +} diff --git a/packages/cli/src/commands/scorecard-classic/validation/validate-scorecard.ts b/packages/cli/src/commands/scorecard-classic/validation/validate-scorecard.ts new file mode 100644 index 0000000000..031d1e7a46 --- /dev/null +++ b/packages/cli/src/commands/scorecard-classic/validation/validate-scorecard.ts @@ -0,0 +1,132 @@ +import { logger, createConfig, lintDocument, pluralize } from '@redocly/openapi-core'; +import { evaluatePluginsFromCode } from './plugin-evaluator.js'; +import { exitWithError } from '../../../utils/error.js'; + +import type { ScorecardConfig } from '@redocly/config'; +import type { Document, RawUniversalConfig, Plugin, BaseResolver } from '@redocly/openapi-core'; +import type { ScorecardProblem } from '../types.js'; + +export type ScorecardValidationResult = { + problems: ScorecardProblem[]; + achievedLevel: string; + targetLevelAchieved: boolean; +}; + +export type ValidateScorecardParams = { + document: Document; + externalRefResolver: BaseResolver; + scorecardConfig: ScorecardConfig; + configPath?: string; + pluginsCodeOrPlugins?: string | Plugin[]; + targetLevel?: string; + verbose?: boolean; +}; + +export async function validateScorecard({ + document, + externalRefResolver, + scorecardConfig, + configPath, + pluginsCodeOrPlugins, + targetLevel, + verbose = false, +}: ValidateScorecardParams): Promise { + const problems: ScorecardProblem[] = []; + const levelResults: Map = new Map(); + + if (targetLevel && !scorecardConfig.levels?.some((level) => level.name === targetLevel)) { + exitWithError( + `Target level "${targetLevel}" not found in the scorecard configuration levels.\n` + ); + } + + for (const level of scorecardConfig?.levels || []) { + if (verbose) { + logger.info(`\nValidating level: "${level.name}"\n`); + } + + const plugins = + typeof pluginsCodeOrPlugins === 'string' + ? await evaluatePluginsFromCode(pluginsCodeOrPlugins, verbose) + : pluginsCodeOrPlugins; + + if (verbose && plugins && plugins.length > 0) { + logger.info( + `Using ${plugins.length} ${pluralize('plugin', plugins.length)} for this level.\n` + ); + } + + const config = await createConfig({ ...level, plugins } as RawUniversalConfig, { + configPath, + }); + + if (verbose) { + logger.info(`Linting document against level rules...\n`); + } + + const levelProblems = await lintDocument({ + document, + externalRefResolver, + config, + }); + + const filteredProblems = levelProblems + .filter(({ ignored }) => !ignored) + .map((problem) => ({ + ...problem, + scorecardLevel: level.name, + })); + + levelResults.set(level.name, filteredProblems); + + if (verbose) { + logger.info( + `Found ${filteredProblems.length} ${pluralize( + 'problem', + filteredProblems.length + )} for level "${level.name}".\n` + ); + } + + problems.push(...filteredProblems); + } + + const achievedLevel = determineAchievedLevel( + levelResults, + scorecardConfig.levels || [], + targetLevel + ); + + const targetLevelAchieved = targetLevel ? achievedLevel === targetLevel : true; + + return { + problems, + achievedLevel, + targetLevelAchieved, + }; +} + +function determineAchievedLevel( + levelResults: Map, + levels: Array<{ name: string }>, + targetLevel?: string +): string { + let lastPassedLevel: string | null = null; + + for (const level of levels) { + const levelProblems = levelResults.get(level.name) || []; + const hasErrors = levelProblems.some((p) => p.severity === 'error'); + + if (hasErrors) { + return lastPassedLevel || 'Non Conformant'; + } + + lastPassedLevel = level.name; + + if (targetLevel && level.name === targetLevel) { + return level.name; + } + } + + return lastPassedLevel || 'Non Conformant'; +} diff --git a/packages/cli/src/index.ts b/packages/cli/src/index.ts index 7f03fba463..5b5d65d19c 100644 --- a/packages/cli/src/index.ts +++ b/packages/cli/src/index.ts @@ -30,10 +30,12 @@ import { version } from './utils/package.js'; import { validatePositiveNumber } from './utils/validate-positive-number.js'; import { validateMountPath } from './utils/validate-mount-path.js'; import { validateMtlsCommandOption } from './commands/respect/mtls/validate-mtls-command-option.js'; +import { handleScorecardClassic } from './commands/scorecard-classic/index.js'; import type { Arguments } from 'yargs'; import type { OutputFormat, RuleSeverity } from '@redocly/openapi-core'; import type { BuildDocsArgv } from './commands/build-docs/types.js'; +import type { ScorecardClassicArgv } from './commands/scorecard-classic/types.js'; import type { EjectArgv } from './commands/eject.js'; dotenv.config({ path: path.resolve(process.cwd(), './.env') }); @@ -604,7 +606,7 @@ yargs(hideBin(process.argv)) }) .check((argv: any) => { if (argv.theme && !argv.theme?.openapi) - throw Error('Invalid option: theme.openapi not set.'); + throw new Error('Invalid option: theme.openapi not set.'); return true; }), async (argv) => { @@ -796,6 +798,39 @@ yargs(hideBin(process.argv)) commandWrapper(handleGenerateArazzo)(argv as Arguments); } ) + .command( + 'scorecard-classic [api]', + 'Run quality scorecards with multiple rule levels to validate and maintain API description standards.', + (yargs) => { + return yargs.positional('api', { type: 'string' }).option({ + config: { + describe: 'Path to the config file.', + type: 'string', + }, + 'project-url': { + describe: 'URL to the project scorecard configuration.', + type: 'string', + }, + format: { + description: 'Use a specific output format.', + choices: ['stylish', 'json'], + default: 'stylish', + }, + 'target-level': { + describe: 'Target level for the scorecard.', + type: 'string', + }, + verbose: { + alias: 'v', + describe: 'Apply verbose mode.', + type: 'boolean', + }, + }); + }, + async (argv) => { + commandWrapper(handleScorecardClassic)(argv as Arguments); + } + ) .completion('completion', 'Generate autocomplete script for `redocly` command.') .demandCommand(1) .middleware([notifyUpdateCliVersion])