This feature only applies to web-based content, such as websites,
- Confluence, YouTube, and GitHub files.
+ Confluence, YouTube, GitHub and Jira files.
diff --git a/server/endpoints/extensions/index.js b/server/endpoints/extensions/index.js
index 0a4acea6cc0..8cf91d78dd9 100644
--- a/server/endpoints/extensions/index.js
+++ b/server/endpoints/extensions/index.js
@@ -170,6 +170,50 @@ function extensionEndpoints(app) {
}
}
);
+
+ app.post(
+ "/ext/obsidian/vault",
+ [validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
+ async (request, response) => {
+ try {
+ const responseFromProcessor =
+ await new CollectorApi().forwardExtensionRequest({
+ endpoint: "/ext/obsidian/vault",
+ method: "POST",
+ body: request.body,
+ });
+ await Telemetry.sendTelemetry("extension_invoked", {
+ type: "obsidian_vault",
+ });
+ response.status(200).json(responseFromProcessor);
+ } catch (e) {
+ console.error(e);
+ response.sendStatus(500).end();
+ }
+ }
+ );
+
+ app.post(
+ "/ext/jira",
+ [validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
+ async (request, response) => {
+ try {
+ const responseFromProcessor =
+ await new CollectorApi().forwardExtensionRequest({
+ endpoint: "/ext/jira",
+ method: "POST",
+ body: request.body,
+ });
+ await Telemetry.sendTelemetry("extension_invoked", {
+ type: "jira",
+ });
+ response.status(200).json(responseFromProcessor);
+ } catch (e) {
+ console.error(e);
+ response.sendStatus(500).end();
+ }
+ }
+ );
}
module.exports = { extensionEndpoints };
diff --git a/server/jobs/sync-watched-documents.js b/server/jobs/sync-watched-documents.js
index 0b3a72d1d3d..7975b58b6db 100644
--- a/server/jobs/sync-watched-documents.js
+++ b/server/jobs/sync-watched-documents.js
@@ -1,152 +1,213 @@
-const { Document } = require('../models/documents.js');
-const { DocumentSyncQueue } = require('../models/documentSyncQueue.js');
-const { CollectorApi } = require('../utils/collectorApi');
+const { Document } = require("../models/documents.js");
+const { DocumentSyncQueue } = require("../models/documentSyncQueue.js");
+const { CollectorApi } = require("../utils/collectorApi");
const { fileData } = require("../utils/files");
-const { log, conclude, updateSourceDocument } = require('./helpers/index.js');
-const { getVectorDbClass } = require('../utils/helpers/index.js');
-const { DocumentSyncRun } = require('../models/documentSyncRun.js');
+const { log, conclude, updateSourceDocument } = require("./helpers/index.js");
+const { getVectorDbClass } = require("../utils/helpers/index.js");
+const { DocumentSyncRun } = require("../models/documentSyncRun.js");
(async () => {
try {
const queuesToProcess = await DocumentSyncQueue.staleDocumentQueues();
if (queuesToProcess.length === 0) {
- log('No outstanding documents to sync. Exiting.');
+ log("No outstanding documents to sync. Exiting.");
return;
}
const collector = new CollectorApi();
if (!(await collector.online())) {
- log('Could not reach collector API. Exiting.');
+ log("Could not reach collector API. Exiting.");
return;
}
- log(`${queuesToProcess.length} watched documents have been found to be stale and will be updated now.`)
+ log(
+ `${queuesToProcess.length} watched documents have been found to be stale and will be updated now.`
+ );
for (const queue of queuesToProcess) {
let newContent = null;
const document = queue.workspaceDoc;
const workspace = document.workspace;
- const { metadata, type, source } = Document.parseDocumentTypeAndSource(document);
+ const { metadata, type, source } =
+ Document.parseDocumentTypeAndSource(document);
if (!metadata || !DocumentSyncQueue.validFileTypes.includes(type)) {
// Document is either broken, invalid, or not supported so drop it from future queues.
- log(`Document ${document.filename} has no metadata, is broken, or invalid and has been removed from all future runs.`)
+ log(
+ `Document ${document.filename} has no metadata, is broken, or invalid and has been removed from all future runs.`
+ );
await DocumentSyncQueue.unwatch(document);
continue;
}
- if (['link', 'youtube'].includes(type)) {
+ if (["link", "youtube"].includes(type)) {
const response = await collector.forwardExtensionRequest({
endpoint: "/ext/resync-source-document",
method: "POST",
body: JSON.stringify({
type,
- options: { link: source }
- })
+ options: { link: source },
+ }),
});
newContent = response?.content;
}
- if (['confluence', 'github', 'gitlab', 'drupalwiki'].includes(type)) {
+ if (
+ ["confluence", "github", "gitlab", "drupalwiki", "jira"].includes(type)
+ ) {
const response = await collector.forwardExtensionRequest({
endpoint: "/ext/resync-source-document",
method: "POST",
body: JSON.stringify({
type,
- options: { chunkSource: metadata.chunkSource }
- })
+ options: { chunkSource: metadata.chunkSource },
+ }),
});
newContent = response?.content;
}
if (!newContent) {
// Check if the last "x" runs were all failures (not exits!). If so - remove the job entirely since it is broken.
- const failedRunCount = (await DocumentSyncRun.where({ queueId: queue.id }, DocumentSyncQueue.maxRepeatFailures, { createdAt: 'desc' })).filter((run) => run.status === DocumentSyncRun.statuses.failed).length;
+ const failedRunCount = (
+ await DocumentSyncRun.where(
+ { queueId: queue.id },
+ DocumentSyncQueue.maxRepeatFailures,
+ { createdAt: "desc" }
+ )
+ ).filter(
+ (run) => run.status === DocumentSyncRun.statuses.failed
+ ).length;
if (failedRunCount >= DocumentSyncQueue.maxRepeatFailures) {
- log(`Document ${document.filename} has failed to refresh ${failedRunCount} times continuously and will now be removed from the watched document set.`)
+ log(
+ `Document ${document.filename} has failed to refresh ${failedRunCount} times continuously and will now be removed from the watched document set.`
+ );
await DocumentSyncQueue.unwatch(document);
continue;
}
- log(`Failed to get a new content response from collector for source ${source}. Skipping, but will retry next worker interval. Attempt ${failedRunCount === 0 ? 1 : failedRunCount}/${DocumentSyncQueue.maxRepeatFailures}`);
- await DocumentSyncQueue.saveRun(queue.id, DocumentSyncRun.statuses.failed, { filename: document.filename, workspacesModified: [], reason: 'No content found.' })
+ log(
+ `Failed to get a new content response from collector for source ${source}. Skipping, but will retry next worker interval. Attempt ${failedRunCount === 0 ? 1 : failedRunCount}/${DocumentSyncQueue.maxRepeatFailures}`
+ );
+ await DocumentSyncQueue.saveRun(
+ queue.id,
+ DocumentSyncRun.statuses.failed,
+ {
+ filename: document.filename,
+ workspacesModified: [],
+ reason: "No content found.",
+ }
+ );
continue;
}
- const currentDocumentData = await fileData(document.docpath)
+ const currentDocumentData = await fileData(document.docpath);
if (currentDocumentData.pageContent === newContent) {
- const nextSync = DocumentSyncQueue.calcNextSync(queue)
- log(`Source ${source} is unchanged and will be skipped. Next sync will be ${nextSync.toLocaleString()}.`);
- await DocumentSyncQueue._update(
+ const nextSync = DocumentSyncQueue.calcNextSync(queue);
+ log(
+ `Source ${source} is unchanged and will be skipped. Next sync will be ${nextSync.toLocaleString()}.`
+ );
+ await DocumentSyncQueue._update(queue.id, {
+ lastSyncedAt: new Date().toISOString(),
+ nextSyncAt: nextSync.toISOString(),
+ });
+ await DocumentSyncQueue.saveRun(
queue.id,
+ DocumentSyncRun.statuses.exited,
{
- lastSyncedAt: new Date().toISOString(),
- nextSyncAt: nextSync.toISOString(),
+ filename: document.filename,
+ workspacesModified: [],
+ reason: "Content unchanged.",
}
);
- await DocumentSyncQueue.saveRun(queue.id, DocumentSyncRun.statuses.exited, { filename: document.filename, workspacesModified: [], reason: 'Content unchanged.' })
continue;
}
// update the defined document and workspace vectorDB with the latest information
// it will skip cache and create a new vectorCache file.
const vectorDatabase = getVectorDbClass();
- await vectorDatabase.deleteDocumentFromNamespace(workspace.slug, document.docId);
- await vectorDatabase.addDocumentToNamespace(
+ await vectorDatabase.deleteDocumentFromNamespace(
workspace.slug,
- { ...currentDocumentData, pageContent: newContent, docId: document.docId },
- document.docpath,
- true
+ document.docId
);
- updateSourceDocument(
- document.docpath,
+ await vectorDatabase.addDocumentToNamespace(
+ workspace.slug,
{
...currentDocumentData,
pageContent: newContent,
docId: document.docId,
- published: (new Date).toLocaleString(),
- // Todo: Update word count and token_estimate?
- }
- )
- log(`Workspace "${workspace.name}" vectors of ${source} updated. Document and vector cache updated.`)
-
+ },
+ document.docpath,
+ true
+ );
+ updateSourceDocument(document.docpath, {
+ ...currentDocumentData,
+ pageContent: newContent,
+ docId: document.docId,
+ published: new Date().toLocaleString(),
+ // Todo: Update word count and token_estimate?
+ });
+ log(
+ `Workspace "${workspace.name}" vectors of ${source} updated. Document and vector cache updated.`
+ );
// Now we can bloom the results to all matching documents in all other workspaces
const workspacesModified = [workspace.slug];
- const moreReferences = await Document.where({
- id: { not: document.id },
- filename: document.filename
- }, null, null, { workspace: true });
+ const moreReferences = await Document.where(
+ {
+ id: { not: document.id },
+ filename: document.filename,
+ },
+ null,
+ null,
+ { workspace: true }
+ );
if (moreReferences.length !== 0) {
- log(`${source} is referenced in ${moreReferences.length} other workspaces. Updating those workspaces as well...`)
+ log(
+ `${source} is referenced in ${moreReferences.length} other workspaces. Updating those workspaces as well...`
+ );
for (const additionalDocumentRef of moreReferences) {
const additionalWorkspace = additionalDocumentRef.workspace;
workspacesModified.push(additionalWorkspace.slug);
- await vectorDatabase.deleteDocumentFromNamespace(additionalWorkspace.slug, additionalDocumentRef.docId);
+ await vectorDatabase.deleteDocumentFromNamespace(
+ additionalWorkspace.slug,
+ additionalDocumentRef.docId
+ );
await vectorDatabase.addDocumentToNamespace(
additionalWorkspace.slug,
- { ...currentDocumentData, pageContent: newContent, docId: additionalDocumentRef.docId },
- additionalDocumentRef.docpath,
+ {
+ ...currentDocumentData,
+ pageContent: newContent,
+ docId: additionalDocumentRef.docId,
+ },
+ additionalDocumentRef.docpath
+ );
+ log(
+ `Workspace "${additionalWorkspace.name}" vectors for ${source} was also updated with the new content from cache.`
);
- log(`Workspace "${additionalWorkspace.name}" vectors for ${source} was also updated with the new content from cache.`)
}
}
const nextRefresh = DocumentSyncQueue.calcNextSync(queue);
- log(`${source} has been refreshed in all workspaces it is currently referenced in. Next refresh will be ${nextRefresh.toLocaleString()}.`)
- await DocumentSyncQueue._update(
+ log(
+ `${source} has been refreshed in all workspaces it is currently referenced in. Next refresh will be ${nextRefresh.toLocaleString()}.`
+ );
+ await DocumentSyncQueue._update(queue.id, {
+ lastSyncedAt: new Date().toISOString(),
+ nextSyncAt: nextRefresh.toISOString(),
+ });
+ await DocumentSyncQueue.saveRun(
queue.id,
+ DocumentSyncRun.statuses.success,
{
- lastSyncedAt: new Date().toISOString(),
- nextSyncAt: nextRefresh.toISOString(),
+ filename: document.filename,
+ workspacesModified,
}
);
- await DocumentSyncQueue.saveRun(queue.id, DocumentSyncRun.statuses.success, { filename: document.filename, workspacesModified })
}
} catch (e) {
- console.error(e)
- log(`errored with ${e.message}`)
+ console.error(e);
+ log(`errored with ${e.message}`);
} finally {
conclude();
}
diff --git a/server/models/documentSyncQueue.js b/server/models/documentSyncQueue.js
index b4e9790ce1c..ceb1ef87d77 100644
--- a/server/models/documentSyncQueue.js
+++ b/server/models/documentSyncQueue.js
@@ -4,7 +4,7 @@ const { SystemSettings } = require("./systemSettings");
const { Telemetry } = require("./telemetry");
/**
- * @typedef {('link'|'youtube'|'confluence'|'github'|'gitlab')} validFileType
+ * @typedef {('link'|'youtube'|'confluence'|'github'|'gitlab'|'jira')} validFileType
*/
const DocumentSyncQueue = {
@@ -17,6 +17,7 @@ const DocumentSyncQueue = {
"github",
"gitlab",
"drupalwiki",
+ "jira",
],
defaultStaleAfter: 604800000,
maxRepeatFailures: 5, // How many times a run can fail in a row before pruning.
@@ -62,6 +63,7 @@ const DocumentSyncQueue = {
if (chunkSource.startsWith("github://")) return true; // If is a GitHub file reference
if (chunkSource.startsWith("gitlab://")) return true; // If is a GitLab file reference
if (chunkSource.startsWith("drupalwiki://")) return true; // If is a DrupalWiki document link
+ if (chunkSource.startsWith("jira://")) return true; // If is a Jira document link
return false;
},
diff --git a/server/models/documents.js b/server/models/documents.js
index a283311537e..6968c5c77bc 100644
--- a/server/models/documents.js
+++ b/server/models/documents.js
@@ -243,7 +243,7 @@ const Document = {
// Some data sources have encoded params in them we don't want to log - so strip those details.
_stripSource: function (sourceString, type) {
- if (["confluence", "github"].includes(type)) {
+ if (["confluence", "github", "jira"].includes(type)) {
const _src = new URL(sourceString);
_src.search = ""; // remove all search params that are encoded for resync.
return _src.toString();
From 97bedecf4c86fda5a2473e24fe5c1ce030fb1528 Mon Sep 17 00:00:00 2001
From: peyt
Date: Mon, 29 Sep 2025 09:30:57 +0200
Subject: [PATCH 2/3] feat: add Jira data connector tests
- Add basic tests for jira data connector
- Update Jira loader index
Closes #4014
---
.../JIra/JiraLoader/jira-issue-loader.test.js | 92 ++++++++++++++
.../extensions/JIra/jira-serivice.test.js | 118 ++++++++++++++++++
.../utils/extensions/Jira/JiraLoader/index.js | 4 -
3 files changed, 210 insertions(+), 4 deletions(-)
create mode 100644 collector/__tests__/utils/extensions/JIra/JiraLoader/jira-issue-loader.test.js
create mode 100644 collector/__tests__/utils/extensions/JIra/jira-serivice.test.js
diff --git a/collector/__tests__/utils/extensions/JIra/JiraLoader/jira-issue-loader.test.js b/collector/__tests__/utils/extensions/JIra/JiraLoader/jira-issue-loader.test.js
new file mode 100644
index 00000000000..644ce7c48fb
--- /dev/null
+++ b/collector/__tests__/utils/extensions/JIra/JiraLoader/jira-issue-loader.test.js
@@ -0,0 +1,92 @@
+const { JiraIssueLoader } = require("../../../../../utils/extensions/Jira/JiraLoader");
+
+describe("JiraIssueLoader", () => {
+ const baseUrl = "https://example.atlassian.net";
+ const projectKey = "TEST";
+ const username = "user";
+ const accessToken = "token";
+ const personalAccessToken = "pat";
+
+ let loader;
+
+ beforeEach(() => {
+ loader = new JiraIssueLoader({ baseUrl, projectKey, username, accessToken });
+ });
+
+ test("generates Basic auth header with username and token", () => {
+ const expected = `Basic ${Buffer.from(`${username}:${accessToken}`).toString("base64")}`;
+ expect(loader.authorizationHeader).toBe(expected);
+ });
+
+ test("generates Bearer auth header with personal access token", () => {
+ const patLoader = new JiraIssueLoader({ baseUrl, projectKey, personalAccessToken });
+ expect(patLoader.authorizationHeader).toBe(`Bearer ${personalAccessToken}`);
+ });
+
+ test("createDocumentFromIssue extracts code blocks correctly", () => {
+ const issue = {
+ id: "1",
+ key: "TEST-1",
+ fields: {
+ summary: "Test Issue",
+ description: {
+ storage: {
+ value: `js`
+ }
+ },
+ status: { name: "Open" },
+ issuetype: { name: "Task" },
+ creator: { displayName: "Alice" },
+ created: "2025-01-01T00:00:00.000Z",
+ },
+ };
+
+ const doc = loader.createDocumentFromIssue(issue);
+
+ expect(doc.pageContent).toContain("```js\nconsole.log(\"Hello World\");\n```");
+ expect(doc.metadata.id).toBe("1");
+ expect(doc.metadata.key).toBe("TEST-1");
+ expect(doc.metadata.status).toBe("Open");
+ expect(doc.metadata.title).toBe("Test Issue");
+ expect(doc.metadata.created_by).toBe("Alice");
+ expect(doc.metadata.url).toBe(`${baseUrl}/jira/browse/TEST-1`);
+ });
+
+ test("load returns empty array on fetch failure", async () => {
+ // Suppress console.error output
+ const consoleSpy = jest.spyOn(console, "error").mockImplementation(() => {});
+
+ // Mock fetchJiraData to throw an error
+ jest.spyOn(loader, "fetchJiraData").mockImplementation(async () => {
+ throw new Error("Network error");
+ });
+
+ const result = await loader.load();
+ expect(result).toEqual([]);
+ expect(consoleSpy).toHaveBeenCalledWith("Error:", expect.any(Error));
+
+ consoleSpy.mockRestore();
+ });
+
+ test("fetchAllIssuesInProject handles paginated results", async () => {
+ const totalIssues = 4;
+
+ // Mock fetchJiraData to simulate paginated API
+ jest.spyOn(loader, "fetchJiraData").mockImplementation(async (url) => {
+ const urlObj = new URL(url);
+ const startAt = parseInt(urlObj.searchParams.get("startAt") || "0", 10);
+ const limit = parseInt(urlObj.searchParams.get("maxResults") || "25", 10);
+
+ const issues = [];
+ for (let i = startAt + 1; i <= Math.min(startAt + limit, totalIssues); i++) {
+ issues.push({ id: i, key: `TEST-${i}`, fields: {} });
+ }
+
+ return { issues, total: totalIssues };
+ });
+
+ const issues = await loader.fetchAllIssuesInProject();
+ expect(issues).toHaveLength(totalIssues);
+ expect(issues.map(i => i.key)).toEqual(["TEST-1", "TEST-2", "TEST-3", "TEST-4"]);
+ });
+});
diff --git a/collector/__tests__/utils/extensions/JIra/jira-serivice.test.js b/collector/__tests__/utils/extensions/JIra/jira-serivice.test.js
new file mode 100644
index 00000000000..815f9d9d4f6
--- /dev/null
+++ b/collector/__tests__/utils/extensions/JIra/jira-serivice.test.js
@@ -0,0 +1,118 @@
+process.env.NODE_ENV = "development";
+process.env.STORAGE_DIR = "C:/temp";
+
+const fs = require("fs");
+const path = require("path");
+const { v4 } = require("uuid");
+const { loadJira, fetchJiraIssue } = require("../../../../utils/extensions/Jira");
+const { JiraIssueLoader } = require("../../../../utils/extensions/Jira/JiraLoader");
+const { writeToServerDocuments, sanitizeFileName } = require("../../../../utils/files");
+const { tokenizeString } = require("../../../../utils/tokenizer");
+
+jest.mock("../../../../utils/extensions/Jira/JiraLoader");
+jest.mock("../../../../utils/files");
+jest.mock("../../../../utils/tokenizer");
+jest.mock("fs");
+
+describe("Jira Service", () => {
+ const mockResponse = { locals: { encryptionWorker: { encrypt: jest.fn((s) => `encrypted:${s}`) } } };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe("loadJira", () => {
+ test("fails when no credentials provided", async () => {
+ const result = await loadJira({ baseUrl: "https://example.atlassian.net", projectKey: "TEST" }, mockResponse);
+ expect(result.success).toBe(false);
+ expect(result.reason).toContain("You need either a personal access token");
+ });
+
+ test("fails when invalid baseUrl", async () => {
+ const result = await loadJira({ baseUrl: "invalid-url", projectKey: "TEST", personalAccessToken: "pat" }, mockResponse);
+ expect(result.success).toBe(false);
+ expect(result.reason).toContain("Provided base URL is not a valid URL");
+ });
+
+ test("saves documents correctly when Jira returns issues", async () => {
+ const mockDocs = [
+ {
+ pageContent: "Test content",
+ metadata: { title: "Issue 1", url: "https://example.atlassian.net/browse/TEST-1", source: "Test source" },
+ },
+ ];
+
+ JiraIssueLoader.mockImplementation(() => ({
+ load: jest.fn().mockResolvedValue(mockDocs),
+ }));
+
+ fs.existsSync.mockReturnValue(false);
+ fs.mkdirSync.mockImplementation(() => {});
+
+ sanitizeFileName.mockImplementation((s) => s);
+ tokenizeString.mockReturnValue(5);
+ writeToServerDocuments.mockImplementation(() => {});
+
+ const result = await loadJira(
+ { baseUrl: "https://example.atlassian.net", projectKey: "TEST", personalAccessToken: "pat" },
+ mockResponse
+ );
+
+ expect(result.success).toBe(true);
+ expect(writeToServerDocuments).toHaveBeenCalled();
+ expect(tokenizeString).toHaveBeenCalledWith("Test content");
+ expect(mockResponse.locals.encryptionWorker.encrypt).toHaveBeenCalled();
+ });
+ });
+
+ describe("fetchJiraIssue", () => {
+ test("fails when required params are missing", async () => {
+ const result = await fetchJiraIssue({ baseUrl: null, pageUrl: "url", projectKey: "TEST", username: "user", accessToken: "token" });
+ expect(result.success).toBe(false);
+ expect(result.reason).toContain("You need either a username and access token");
+ });
+
+ test("returns content when Jira issue found", async () => {
+ const mockDocs = [
+ { pageContent: "Issue content", metadata: { url: "url" } },
+ ];
+
+ JiraIssueLoader.mockImplementation(() => ({
+ load: jest.fn().mockResolvedValue(mockDocs),
+ }));
+
+ const result = await fetchJiraIssue({
+ baseUrl: "https://example.atlassian.net",
+ pageUrl: "url",
+ projectKey: "TEST",
+ username: "user",
+ accessToken: "token",
+ });
+
+ expect(result.success).toBe(true);
+ expect(result.content).toBe("Issue content");
+ });
+
+ test("returns failure when issue not found", async () => {
+ const mockDocs = [
+ { pageContent: "Other content", metadata: { url: "other-url" } },
+ ];
+
+ JiraIssueLoader.mockImplementation(() => ({
+ load: jest.fn().mockResolvedValue(mockDocs),
+ }));
+
+ const result = await fetchJiraIssue({
+ baseUrl: "https://example.atlassian.net",
+ pageUrl: "url",
+ projectKey: "TEST",
+ username: "user",
+ accessToken: "token",
+ });
+
+ expect(result.success).toBe(false);
+ expect(result.content).toBeNull();
+ expect(result.reason).toContain("Target page could not be found");
+ });
+ });
+});
diff --git a/collector/utils/extensions/Jira/JiraLoader/index.js b/collector/utils/extensions/Jira/JiraLoader/index.js
index 4761bc4a190..1a447b93efb 100644
--- a/collector/utils/extensions/Jira/JiraLoader/index.js
+++ b/collector/utils/extensions/Jira/JiraLoader/index.js
@@ -1,7 +1,3 @@
-/*
- * This is a custom implementation of the Confluence langchain loader. There was an issue where
- * code blocks were not being extracted. This is a temporary fix until this issue is resolved.*/
-
const { htmlToText } = require("html-to-text");
class JiraIssueLoader {
From 9fd4d56ed17ba9930afcce294a29af57aa0037ff Mon Sep 17 00:00:00 2001
From: peyt
Date: Fri, 3 Oct 2025 17:47:58 +0200
Subject: [PATCH 3/3] feat: PR drawt changes
- Correct translation for Spanish
Closes #4014
---
frontend/src/locales/es/common.js | 42 +++++++++++++++----------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/frontend/src/locales/es/common.js b/frontend/src/locales/es/common.js
index 4bff28de401..469a84be8fa 100644
--- a/frontend/src/locales/es/common.js
+++ b/frontend/src/locales/es/common.js
@@ -820,27 +820,27 @@ const TRANSLATIONS = {
"Una vez completado, el contenido de la página estará disponible para incrustar en los espacios de trabajo en el selector de documentos.",
},
jira: {
- name: "Jira",
- description: "",
- deployment_type: "",
- deployment_type_explained: "",
- base_url: "",
- base_url_explained: "",
- project_key: "",
- project_key_explained: "",
- username: "",
- username_explained: "",
- auth_type: "",
- auth_type_explained: "",
- auth_type_username: "",
- auth_type_personal: "",
- token: "",
- token_explained_start: "",
- token_explained_link: "",
- token_desc: "",
- pat_token: "",
- pat_token_explained: "",
- task_explained: "",
+ name: null,
+ description: null,
+ deployment_type: null,
+ deployment_type_explained: null,
+ base_url: null,
+ base_url_explained: null,
+ project_key: null,
+ project_key_explained: null,
+ username: null,
+ username_explained: null,
+ auth_type: null,
+ auth_type_explained: null,
+ auth_type_username: null,
+ auth_type_personal: null,
+ token: null,
+ token_explained_start: null,
+ token_explained_link: null,
+ token_desc: null,
+ pat_token: null,
+ pat_token_explained: null,
+ task_explained: null,
},
manage: {
documents: "Documentos",