diff --git a/.github/scripts/package.json b/.github/scripts/package.json new file mode 100644 index 000000000..a2e909367 --- /dev/null +++ b/.github/scripts/package.json @@ -0,0 +1,17 @@ +{ + "name": "validator-runner-tests", + "private": true, + "type": "commonjs", + "scripts": { + "test": "jest --runInBand" + }, + "devDependencies": { + "jest": "^29.7.0" + }, + "dependencies": { + "@microsoft.azure/openapi-validator-rulesets": "^2.1.7", + "@stoplight/spectral-core": "^1.18.3", + "@stoplight/json-ref-resolver": "^3.1.6", + "js-yaml": "^4.1.0" + } +} \ No newline at end of file diff --git a/.github/scripts/run-selected-rules.js b/.github/scripts/run-selected-rules.js new file mode 100644 index 000000000..26faffd94 --- /dev/null +++ b/.github/scripts/run-selected-rules.js @@ -0,0 +1,299 @@ +"use strict" + +/* +Lightweight Spectral rules runner +--------------------------------- +Purpose: +- Execute selected linter rules against a set of specs. + +- Parses RULE_NAMES, SPEC_ROOT, FAIL_ON_ERRORS, MAX_FILES, OUTPUT_FILE from env. +- Loads the Spectral ARM ruleset from @microsoft.azure/openapi-validator-rulesets. +- Disables all rules except the requested ones. +- Walks SPEC_ROOT and only includes JSON specs under resource-manager/stable for selected RPs (network, compute, monitor, sql, hdinsight, resource, storage). EXCLUDE_DIRS is ignored for now. +- Runs Spectral and prints findings in a compact format. + +Outputs: +- Console lines formatted as: "SEV | CODE | file[@path] | message". +- Optional OUTPUT_FILE with the same console lines plus a final summary line. +*/ +const fs = require('fs') +const path = require('path') +// ============ +// Env parsing +// ============ + +// Inputs +const RULE_NAMES = (process.env.RULE_NAMES || '').split(',').map(s => s.trim()).filter(Boolean) +const SPEC_ROOT = process.env.SPEC_ROOT || '' +const FAIL_ON_ERRORS = /^true$/i.test(process.env.FAIL_ON_ERRORS || 'false') +const OUTPUT_FILE = process.env.OUTPUT_FILE || '' +const MAX_FILES = parseInt(process.env.MAX_FILES || '20000', 10) +// Consider common test/debug environments to avoid hard exits during tests +const IN_TEST = !!(process.env.JEST_WORKER_ID || process.env.npm_lifecycle_event === 'test' || process.env.VSCODE_PID) + +// Quick validation of required inputs +if (!SPEC_ROOT) { + console.error('SPEC_ROOT not provided') + process.exit(1) +} + +// ===================== +// Early-exit: no rules +// ===================== +function writeNoRulesSummary() { + if (!OUTPUT_FILE) return + try { + const dir = path.dirname(OUTPUT_FILE) + fs.mkdirSync(dir, { recursive: true }) + const lines = [ + 'INFO | Runner | rules | No rules specified; nothing to run.', + 'INFO | Runner | summary | Files scanned: 0, Errors: 0, Warnings: 0', + ] + fs.writeFileSync(OUTPUT_FILE, lines.join('\n') + '\n', 'utf8') + } catch {} +} + +// Early no-rules handling (write file + exit 0) before loading heavy deps +if (RULE_NAMES.length === 0) { + writeNoRulesSummary() + process.exit(0) +} + +// ==================================== +// Early-exit: all rules are unknown +// ==================================== +function earlyExitUnknownRules(unknownList) { + if (!OUTPUT_FILE) return false + try { + const dir = path.dirname(OUTPUT_FILE) + fs.mkdirSync(dir, { recursive: true }) + const lines = [ + `WARN | Runner | rules | Unknown rule names: ${unknownList.join(', ')}`, + 'INFO | Runner | summary | Files scanned: 0, Errors: 0, Warnings: 0', + ] + fs.writeFileSync(OUTPUT_FILE, lines.join('\n') + '\n', 'utf8') + } catch {} + return true +} + +// We need known rule names; gather them cheaply by requiring only the exported ruleset object +try { + const { spectralRulesets } = require('@microsoft.azure/openapi-validator-rulesets') + const tentative = spectralRulesets?.azARM?.rules ? Object.keys(spectralRulesets.azARM.rules) : [] + const unknown = RULE_NAMES.filter(n => !tentative.includes(n)) + if (tentative.length > 0 && unknown.length === RULE_NAMES.length) { + earlyExitUnknownRules(unknown) + process.exit(0) + } +} catch {} + +// ======================================== +// Lazy load heavy deps and init Spectral +// ======================================== +let Spectral = null +let Ruleset = null +let Resolver = null +let pathToFileURL = null +let spectralRulesets = null +let deleteRulesPropertiesInPayloadNotValidForSpectralRules = null +let disableRulesInRuleset = null +let spectral = null +let ruleset = null +let allRuleNames = [] +let SPECTRAL_OK = true +let spectralInitError = null +let resolverInstance = null + +try { + ;({ Spectral, Ruleset } = require('@stoplight/spectral-core')) + ;({ Resolver } = require('@stoplight/json-ref-resolver')) + ;({ pathToFileURL } = require('url')) + ;({ spectralRulesets, deleteRulesPropertiesInPayloadNotValidForSpectralRules, disableRulesInRuleset } = require('@microsoft.azure/openapi-validator-rulesets')) + + const rulesetPayload = spectralRulesets.azARM + // Spectral Ruleset constructor requires disallowed props removed + deleteRulesPropertiesInPayloadNotValidForSpectralRules(rulesetPayload) + ruleset = new Ruleset(rulesetPayload) + + // Disable all rules, then keep only requested ones enabled + allRuleNames = Object.keys(ruleset.rules) + const namesToDisable = allRuleNames.filter(n => !RULE_NAMES.includes(n)) + disableRulesInRuleset(ruleset, namesToDisable) + + spectral = new Spectral() + // Create a resolver instance and attach it depending on Spectral version + resolverInstance = new Resolver() + if (typeof spectral.setResolver === 'function') { + try { spectral.setResolver(resolverInstance) } catch {} + } + spectral.setRuleset(ruleset) +} catch (e) { + SPECTRAL_OK = false + spectralInitError = e +} + +// ===================== +// Helpers (FS & paths) +// ===================== + +// Selected resource providers and path filter (resource-manager + stable) +const SELECTED_RPS = ['network', 'compute', 'monitor', 'sql', 'hdinsight', 'resource', 'storage'] + +function isAllowedSpecFile(filePath) { + const p = filePath.replace(/\\/g, '/').toLowerCase() + if (!p.endsWith('.json')) return false + if (!p.includes('/resource-manager/')) return false + if (!p.includes('/stable/')) return false + const matchesRp = SELECTED_RPS.some(rp => p.includes(`/specification/${rp}/`)) + return matchesRp +} + +function* walk(dir) { + const entries = fs.readdirSync(dir, { withFileTypes: true }) + for (const e of entries) { + const p = path.join(dir, e.name) + if (e.isDirectory()) { + // Descend into all directories + yield* walk(p) + } else if (e.isFile()) { + if (isAllowedSpecFile(p)) { + yield p + } + } + } +} + +function ensureDirExists(filePath) { + try { + const dir = path.dirname(filePath) + fs.mkdirSync(dir, { recursive: true }) + } catch {} +} + +function writeOutputFile(lines) { + if (!OUTPUT_FILE) return + try { + ensureDirExists(OUTPUT_FILE) + fs.writeFileSync(OUTPUT_FILE, lines.join('\n') + '\n', 'utf8') + } catch (e) { + console.warn(`Failed to write OUTPUT_FILE (${OUTPUT_FILE}): ${e.message}`) + } +} + +// ===================== +// Helpers (I/O & parse) +// ===================== + +function getSeverityLabel(severity) { + // Spectral: 0=error, 1=warn, 2=info + return severity === 0 ? 'error' : severity === 1 ? 'warn' : 'info' +} + +function readSpecFile(file) { + // Returns { ok: boolean, doc?: any, errorMsg?: string } + try { + const content = fs.readFileSync(file, 'utf8') + return { ok: true, doc: JSON.parse(content) } + } catch (e) { + return { ok: false, errorMsg: `Skipping unreadable file: ${e.message}` } + } +} + +// ============== +// Main routine +// ============== +async function run() { + let errorCount = 0 + let warnCount = 0 + let filesScanned = 0 + const lines = [] + const logLine = (s) => { + console.log(s) + lines.push(s) + } + + // If Spectral failed to initialize, surface a clear warning to output + if (!SPECTRAL_OK) { + const msg = spectralInitError?.message || 'Unknown error' + logLine(`WARN | Runner | spectral | Initialization failed: ${msg}`) + } + + // Warn on unknown rule names + const unknown = RULE_NAMES.filter(n => !allRuleNames.includes(n)) + if (unknown.length > 0) { + logLine(`WARN | Runner | rules | Unknown rule names: ${unknown.join(', ')}`) + } + + // Determine if SPEC_ROOT is a file or directory + let rootStat = null + try { + rootStat = fs.statSync(SPEC_ROOT) + } catch (e) { + logLine(`WARN | Runner | input | SPEC_ROOT not found or unreadable: ${e.message}`) + // Summary and graceful exit in tests + lines.push(`INFO | Runner | summary | Files scanned: 0, Errors: 0, Warnings: 0`) + writeOutputFile(lines) + if (!IN_TEST) process.exit(1) + return + } + + const isSingleFile = rootStat.isFile() + + if (isSingleFile && !isAllowedSpecFile(SPEC_ROOT)) { + logLine(`WARN | Runner | input | SPEC_ROOT is not an allowed spec (must be JSON in resource-manager/stable for selected RPs): ${SPEC_ROOT}`) + lines.push(`INFO | Runner | summary | Files scanned: 0, Errors: 0, Warnings: 0`) + writeOutputFile(lines) + return + } + + const iterator = isSingleFile ? [SPEC_ROOT] : walk(SPEC_ROOT) + for (const file of iterator) { + if (filesScanned >= MAX_FILES) { + logLine(`WARN | Runner | summary | Reached MAX_FILES=${MAX_FILES}, stopping scan early`) + break + } + filesScanned++ + const parsed = readSpecFile(file) + if (!parsed.ok) { + logLine(`WARN | Runner | ${file} | ${parsed.errorMsg}`) + if (parsed.skipped) continue + else continue + } + const doc = parsed.doc + + if (SPECTRAL_OK && spectral && pathToFileURL) { + try { + const resolveOpts = { documentUri: pathToFileURL(file).toString() } + // For Spectral versions without setResolver, pass resolver inline + if (resolverInstance && (!spectral.setResolver || typeof spectral.setResolver !== 'function')) { + resolveOpts.resolver = resolverInstance + } + const results = await spectral.run(doc, { resolve: resolveOpts }) + const filtered = results.filter(r => RULE_NAMES.includes(r.code)) + if (filtered.length > 0) { + for (const r of filtered) { + const loc = r.path?.join('.') || '' + const sev = getSeverityLabel(r.severity) + logLine(`${sev.toUpperCase()} | ${r.code} | ${file}${loc ? ' @ ' + loc : ''} | ${r.message}`) + if (r.severity === 0) errorCount++ + else if (r.severity === 1) warnCount++ + } + } + } catch (e) { + logLine(`WARN | Runner | ${file} | Spectral failed: ${e.message}`) + } + } + } + // Summary + lines.push(`INFO | Runner | summary | Files scanned: ${filesScanned}, Errors: ${errorCount}, Warnings: ${warnCount}`) + writeOutputFile(lines) + if (!IN_TEST && FAIL_ON_ERRORS && errorCount > 0) { + console.error(`Found ${errorCount} error(s)`) + process.exit(2) + } +} + +run().catch(e => { + console.error(e) + process.exit(1) +}) diff --git a/.github/scripts/tests/runner.test.js b/.github/scripts/tests/runner.test.js new file mode 100644 index 000000000..1f6dace8b --- /dev/null +++ b/.github/scripts/tests/runner.test.js @@ -0,0 +1,114 @@ +const fs = require('fs') +const path = require('path') +const { spawnSync } = require('child_process') + +const repoRoot = path.resolve(__dirname, '../../..') +const scriptPath = path.join(repoRoot, '.github', 'scripts', 'run-selected-rules.js') + +function run(env = {}, cwd = repoRoot) { + const result = spawnSync(process.execPath, [scriptPath], { + cwd, + env: { ...process.env, ...env }, + encoding: 'utf8', + timeout: 60000, + }) + return result +} + +function writeFile(p, content) { + fs.mkdirSync(path.dirname(p), { recursive: true }) + fs.writeFileSync(p, content, 'utf8') +} + +describe('runner', () => { + const tmpRoot = path.join(repoRoot, '.github', 'scripts', 'tmp-tests') + const specsDir = path.join(tmpRoot, 'specs') + const artifactsDir = path.join(tmpRoot, 'artifacts') + const allowedDir = path.join(specsDir, 'specification', 'network', 'resource-manager', 'stable') + const srcDir = path.join(__dirname, 'specs') + + beforeAll(() => { + fs.rmSync(tmpRoot, { recursive: true, force: true }) + fs.mkdirSync(specsDir, { recursive: true }) + fs.mkdirSync(artifactsDir, { recursive: true }) + fs.mkdirSync(allowedDir, { recursive: true }) + }) + + afterAll(() => { + fs.rmSync(tmpRoot, { recursive: true, force: true }) + }) + + test('no rules -> no-op with summary and 0 exit', () => { + const outFile = path.join(artifactsDir, 'no-rules.txt') + const res = run({ SPEC_ROOT: specsDir, RULE_NAMES: '', OUTPUT_FILE: outFile }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/No rules specified; nothing to run/i) + expect(out).toMatch(/Files scanned: 0/i) + }) + + test('unknown rule -> warning is logged', () => { + const outFile = path.join(artifactsDir, 'unknown-rule.txt') + const res = run({ SPEC_ROOT: specsDir, RULE_NAMES: 'DoesNotExist', OUTPUT_FILE: outFile }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/Unknown rule names: DoesNotExist/) + }) + + test('MAX_FILES limits scanning', () => { + // Create many dummy files under allowed path so the runner includes them + for (let i = 0; i < 5; i++) { + writeFile(path.join(allowedDir, `dummy${i}.json`), '{}') + } + const outFile = path.join(artifactsDir, 'maxfiles.txt') + const res = run({ SPEC_ROOT: specsDir, RULE_NAMES: 'PostResponseCodes', OUTPUT_FILE: outFile, MAX_FILES: '2' }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/Reached MAX_FILES=2/) + }) + + test('LRO POST triggers PostResponseCodes no error', () => { + // Copy fixture into allowed path + const fixture = path.join(srcDir, 'lro-post-demo.json') + const target = path.join(allowedDir, 'lro-post-demo.json') + writeFile(target, fs.readFileSync(fixture, 'utf8')) + const outFile = path.join(artifactsDir, 'lro-post.txt') + const res = run({ SPEC_ROOT: target, RULE_NAMES: 'PostResponseCodes', OUTPUT_FILE: outFile, FAIL_ON_ERRORS: 'true' }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/Files scanned:\s*\d+/i) + }) + + test('violating LRO POST triggers PostResponseCodes error', () => { + // Copy violating spec into allowed path and run against that file + const fixture = path.join(srcDir, 'bad-lro-post.json') + const badSpec = path.join(allowedDir, 'bad-lro-post.json') + writeFile(badSpec, fs.readFileSync(fixture, 'utf8')) + const outFile = path.join(artifactsDir, 'lro-post-bad.txt') + const res = run({ SPEC_ROOT: badSpec, RULE_NAMES: 'PostResponseCodes', OUTPUT_FILE: outFile, FAIL_ON_ERRORS: 'true' }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/ERROR\s*\|\s*PostResponseCodes/i) + expect(out).toMatch(/Files scanned:\s*\d+/i) + }) + + test('run multiple rules against multiple specs and flag errors', () => { + const outFile = path.join(artifactsDir, 'multi-rules.txt') + // Copy fixtures into allowed path + const badLro = path.join(srcDir, 'bad-lro-post.json') + const badDelete = path.join(srcDir, 'bad-delete-with-body.json') + writeFile(path.join(allowedDir, 'bad-lro-post.json'), fs.readFileSync(badLro, 'utf8')) + writeFile(path.join(allowedDir, 'bad-delete-with-body.json'), fs.readFileSync(badDelete, 'utf8')) + const res = run({ + SPEC_ROOT: allowedDir, + RULE_NAMES: 'PostResponseCodes,DeleteMustNotHaveRequestBody', + OUTPUT_FILE: outFile, + FAIL_ON_ERRORS: 'true' + }) + expect(res.status).toBe(0) + const out = fs.readFileSync(outFile, 'utf8') + expect(out).toMatch(/ERROR\s*\|\s*PostResponseCodes/i) + expect(out).toMatch(/ERROR\s*\|\s*DeleteMustNotHaveRequestBody/i) + expect(out).toMatch(/Files scanned:\s*\d+/i) + }) +}) diff --git a/.github/scripts/tests/specs/bad-delete-with-body.json b/.github/scripts/tests/specs/bad-delete-with-body.json new file mode 100644 index 000000000..2ec792051 --- /dev/null +++ b/.github/scripts/tests/specs/bad-delete-with-body.json @@ -0,0 +1,67 @@ +{ + "swagger": "2.0", + "info": { + "title": "Bad Delete With Body", + "version": "2021-01-01" + }, + "host": "management.azure.com", + "schemes": [ + "https" + ], + "basePath": "/", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Example/widgets/{widgetName}": { + "delete": { + "operationId": "Widgets_Delete", + "parameters": [ + { + "name": "subscriptionId", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "resourceGroupName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "widgetName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "api-version", + "in": "query", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object" + } + } + ], + "responses": { + "200": { + "description": "OK" + }, + "default": { + "description": "Error" + } + } + } + } + } +} \ No newline at end of file diff --git a/.github/scripts/tests/specs/bad-lro-post.json b/.github/scripts/tests/specs/bad-lro-post.json new file mode 100644 index 000000000..d3faad16f --- /dev/null +++ b/.github/scripts/tests/specs/bad-lro-post.json @@ -0,0 +1,35 @@ +{ + "swagger": "2.0", + "info": { + "title": "Bad LRO POST", + "version": "1.0.0" + }, + "host": "management.azure.com", + "schemes": [ + "https" + ], + "paths": { + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Demo/things/{thingName}/action": { + "post": { + "operationId": "Things_Create_Bad", + "x-ms-long-running-operation": true, + "parameters": [ + { + "name": "api-version", + "in": "query", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "OK" + }, + "default": { + "description": "Error" + } + } + } + } + } +} \ No newline at end of file diff --git a/.github/scripts/tests/specs/lro-post-demo.json b/.github/scripts/tests/specs/lro-post-demo.json new file mode 100644 index 000000000..0cfe5f6f9 --- /dev/null +++ b/.github/scripts/tests/specs/lro-post-demo.json @@ -0,0 +1,59 @@ +{ + "swagger": "2.0", + "info": { + "title": "Demo LRO POST", + "version": "1.0" + }, + "host": "management.azure.com", + "schemes": [ + "https" + ], + "paths": { + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Demo/things/{thingName}": { + "post": { + "operationId": "Things_Create_Demo", + "x-ms-long-running-operation": true, + "parameters": [ + { + "name": "subscriptionId", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "resourceGroupName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "thingName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "api-version", + "in": "query", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "created", + "schema": { + "$ref": "#/definitions/FooResource" + } + }, + "202": { + "description": "accepted" + }, + "default": { + "description": "Error" + } + } + } + } + } +} \ No newline at end of file diff --git a/.github/workflows/staging-lint-checks.yaml b/.github/workflows/staging-lint-checks.yaml new file mode 100644 index 000000000..988e58af9 --- /dev/null +++ b/.github/workflows/staging-lint-checks.yaml @@ -0,0 +1,83 @@ +name: Staging Lint Checks + +on: + pull_request: + types: [opened, edited, synchronize, labeled, unlabeled] + +env: + SPEC_REPO: Azure/azure-rest-api-specs + SPEC_CHECKOUT_PATH: specs + EXCLUDE_DIRS: '[]' # List of folder substrings to exclude (JSON array), e.g. ["node_modules",".git","specification/common-types"] + FAIL_ON_ERRORS: "false" # Set to "true" to fail the job when errors are found + +jobs: + run-selected-rules: + runs-on: ubuntu-latest + steps: + - name: Checkout validator repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 18 + + - name: Determine rule names (labels first, fallback to body) + uses: actions/github-script@v7 + with: + script: | + const labels = (context.payload.pull_request?.labels || []).map(l => l.name || ""); + const body = context.payload.pull_request?.body || ""; + + // From labels: expect labels like test- + const fromLabels = labels + .filter(n => /^test-/i.test(n)) + .map(n => n.replace(/^test-/i, "").trim()) + .filter(Boolean); + + // From body: parse a line like "rules: A, B" (case-insensitive) + const ruleLine = (body.match(/^\s*rules?\s*:(.*)$/gim) || []) + .map(l => l.split(":")[1] || "") + .join(","); + + const fromRuleLine = ruleLine + .split(/[\n,]/) + .map(s => s.trim()) + .filter(Boolean); + + // Prefer label selection when available; otherwise use body + const chosen = (fromLabels.length > 0) ? fromLabels : fromRuleLine; + const all = Array.from(new Set(chosen)); + const value = all.join(','); + core.exportVariable('RULE_NAMES', value); + core.info(`Selected rules: ${value || ""}`); + + - name: Checkout specs repository + uses: actions/checkout@v4 + with: + repository: ${{ env.SPEC_REPO }} + path: ${{ env.SPEC_CHECKOUT_PATH }} + + - name: Install runner dependencies + run: | + npm --version + npm init -y + npm i @microsoft.azure/openapi-validator-rulesets @stoplight/spectral-core @stoplight/json-ref-resolver js-yaml + + - name: Run selected rules on specs + env: + RULE_NAMES: ${{ env.RULE_NAMES }} + SPEC_ROOT: ${{ github.workspace }}/${{ env.SPEC_CHECKOUT_PATH }} + EXCLUDE_DIRS: ${{ env.EXCLUDE_DIRS }} + FAIL_ON_ERRORS: ${{ env.FAIL_ON_ERRORS }} + OUTPUT_FILE: ${{ github.workspace }}/artifacts/linter-findings.txt + run: | + node .github/scripts/run-selected-rules.js + + - name: Upload findings artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: linter-findings + path: artifacts/linter-findings.txt + if-no-files-found: warn diff --git a/.vscode/settings.json b/.vscode/settings.json index 10eda7a8b..55da57dd7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,8 @@ { + "jest.rootPath": ".github/scripts", + "jest.jestCommandLine": "npm test --", + "jest.enableInlineErrorMessages": true, + "jest.autoRun": "off", "[typescript]": { "editor.formatOnSave": true, "editor.formatOnPaste": true,