diff --git a/.asciidoctor/docinfo-footer.html b/.asciidoctor/docinfo-footer.html new file mode 100644 index 0000000000..a43ff6f707 --- /dev/null +++ b/.asciidoctor/docinfo-footer.html @@ -0,0 +1,9 @@ + diff --git a/.asciidoctor/docinfo.html b/.asciidoctor/docinfo.html new file mode 100644 index 0000000000..88ca25519c --- /dev/null +++ b/.asciidoctor/docinfo.html @@ -0,0 +1,9 @@ +
+ +
diff --git a/.asciidoctor/docs.css b/.asciidoctor/docs.css new file mode 100644 index 0000000000..a3e929f89e --- /dev/null +++ b/.asciidoctor/docs.css @@ -0,0 +1,1575 @@ +/* Red Hat Fonts */ +@font-face { + font-family: 'Red Hat Display'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatDisplay/RedHatDisplayVF.woff2") format('woff2-variations'); + font-weight: 300 900; + font-style: normal; + font-display: fallback; +} +@font-face { + font-family: 'Red Hat Display'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatDisplay/RedHatDisplayVF-Italic.woff2") format('woff2-variations'); + font-weight: 300 900; + font-style: italic; + font-display: fallback; +} +@font-face { + font-family: 'Red Hat Text'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatText/RedHatTextVF.woff2") format('woff2-variations'); + font-weight: 300 700; + font-style: normal; + font-display: fallback; +} +@font-face { + font-family: 'Red Hat Text'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatText/RedHatTextVF-Italic.woff2") format('woff2-variations'); + font-weight: 300 700; + font-style: italic; + font-display: fallback; +} +@font-face { + font-family: 'Red Hat Mono'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatMono/RedHatMonoVF.woff2") format('woff2-variations'); + font-weight: 300 700; + font-style: normal; + font-display: fallback; +} +@font-face { + font-family: 'Red Hat Mono'; + src: url("https://redhatofficial.github.io/RedHatFont/RedHatMono/RedHatMonoVF-Italic.woff2") format('woff2-variations'); + font-weight: 300 700; + font-style: italic; + font-display: fallback; +} + +/* Positioning toc and content */ +@media screen and (min-width: 768px) { + body.toc2 { + padding-left: 400px; + padding-right: 0; + } +} +@media screen and (min-width: 768px) { + #toc.toc2 { + margin-top: 0 !important; + position: fixed; + width: 319px; + left: 0; + top: 0; + border-right: 1px solid #e7e7e9; + border-top-width: 0 !important; + border-bottom-width: 0 !important; + z-index: 1000; + padding: 1.25em 1em; + height: 100%; + overflow: auto; + } +} + +/* Style borrowed from docs.redhat.com and adapted to the HTML structure */ + +#toc.toc2 { + background:#f2f2f2; + justify-content:space-between; + margin:1rem 0 2rem; + padding:1rem +} +#toctitle { + font-size:1.25rem; + font-weight:400; + line-height:1.6667; + margin-top:0; + text-transform:none +} +#toc li { + margin-bottom:.25em; + padding-left:0; + list-style:none; +} +#toc a, +#toc a:visited { + color:black; + display:inline-block; + text-decoration:none; +} + +.sectlevel1 { + padding-left:0; +} + +.sectlevel2, +.sectlevel3, +.sectlevel4, +.sectlevel5 { + padding-left:1em; +} + +h3 { + color:#464646; + font-family:var(--rh-font-family-heading,"Red Hat Display",Helvetica,Arial,sans-serif); + font-size:var(--rh-font-size-body-text-lg,1.125rem) +} + +li { + display:list-item; + margin:0; + padding:0; + text-align:-webkit-match-parent +} + +*, +:after, +:before, +:after, +:before { + box-sizing:border-box +} +img, +object, +svg, +img, +object, +svg { + display:inline-block; + max-width:100%; + vertical-align:middle +} +hr { + border:0; + border-top:.0625rem solid #d2d2d2; + clear:both; + margin:1rem 0 +} +a { + color:#06c; + text-decoration:underline +} +a:focus, +a:hover { + color:#036 +} +p { + margin:1.49963rem 0 +} +li>p { + margin:0 +} +h1, +h2, +h3, +h4, +h5, +h6 { + font-family:RedHatDisplay,Red Hat Display,Helvetica Neue,Arial,sans-serif; + font-weight:400; + margin:0 0 .625rem; + color:#464646; +} +h1 { + font-size:2rem; + margin:2rem 0 +} +h2 { + font-size:2rem; + margin:2rem 0 +} +h3 { + font-size:1.65rem; + font-weight:400 +} +h4 { + font-size:1.5rem +} +h5 { + font-size:1.25rem + font-weight:400 +} +h6 { + font-size:1.125rem; + font-weight:500; + line-height:1.6667 +} +ol ::marker, +ul ::marker { + font:inherit +} +li { + margin:0 0 .5em; + padding:0 +} +li>p { + margin:.5rem 0 +} +li>ol, +li>ul { + margin:0 +} +dl dd { + margin:.5rem 0 .5rem 1rem +} +dl dd>p { + margin:.5rem 0 +} +.informaltable, +.table-contents, +.table-wrapper { + max-height:var(--rh-table--maxHeight); + overflow:auto +} +table { + border:0; + font-size:1rem; + line-height:1.6667; + /* commented out as it breaks on Safari on Mac // table-layout:fixed */ +} +table caption { + color:#585858; + margin-bottom:.5rem; + margin-top:.5rem; + text-align:left +} +table td, +table th { + border:0; + border-bottom:.0625rem solid #d2d2d2; + border-bottom:.0625rem solid var(--pfe-table--Border,#d2d2d2); + padding:.5em 1rem +} +table td.halign-left, +table th.halign-left { + text-align:left +} +table td.halign-center, +table th.halign-center, +table td.halign-center, +table th.halign-center { + text-align:center +} +table td.halign-right, +table th.halign-right { + text-align:right +} +table td.valign-top, +table th.valign-top { + vertical-align:top +} +table td.valign-middle, +table th.valign-middle { + vertical-align:middle +} +table td.valign-bottom, +table th.valign-bottom { + vertical-align:bottom +} +table thead td, +table thead th { + background:#f5f5f5; + font-weight:600 +} +rh-table table, +rh-table.rh-table--expanded-vertically { + max-height:-moz-max-content; + max-height:max-content +} +pre.nowrap { + overflow:auto; + overflow-wrap:normal; + white-space:pre; + word-break:normal +} +.listingblock pre { + background:transparent +} + +[class] pre code { + background:inherit; + color:inherit; + font-family:inherit; + font-size:inherit; + font-weight:inherit; + line-height:inherit; + padding:0 +} +.keycap, +kbd { + background-color:#eee; + background-image:linear-gradient(180deg,#ddd,#eee,#fff); + border-radius:.1875rem; + box-shadow:0 -.0625rem #fff,0 .0625rem 0 .1875rem #aaa; + font-family:RedHatMono,Red Hat Mono,Consolas,monospace; + font-size:90%; + font-weight:400; + margin:0 .25rem; + padding:.125rem .375rem +} +.keycap strong, +.keycap strong { + font-weight:inherit +} +kbd.keyseq, +kbd.keyseq { + background:transparent; + border:0; + box-shadow:none; + padding:0 +} +kbd.keyseq kbd, +kbd.keyseq kbd { + display:inline-block; + margin:0 .375rem +} +kbd.keyseq kbd:first-child, +kbd.keyseq kbd:first-child { + margin-left:0 +} +b.button { + font-size:90%; + font-weight:700; + padding:.1875rem +} +b.button:before { + content:"[" +} +b.button:after { + content:"]" +} +html { + font-family:sans-serif; + -ms-text-size-adjust:100%; + -webkit-text-size-adjust:100% +} +body { + margin:0 +} +audio, +canvas, +progress, +video { + display:inline-block; + vertical-align:baseline +} +audio:not([controls]) { + display:none; + height:0 +} +[hidden], +template { + display:none +} +a { + background:transparent +} +a:active, +a:hover { + outline:0 +} +abbr[title] { + border-bottom:.0625rem dotted +} +dfn { + font-style:italic +} + +mark { + background:#ff0; + color:#000 +} +small { + font-size:80% +} +sub, +sup { + font-size:75%; + line-height:0; + position:relative; + vertical-align:baseline +} +sup { + top:-.5em +} +sub { + bottom:-.25em +} +img { + border:0 +} +svg:not(:root) { + overflow:hidden +} +figure { + margin:1em 2.5rem +} +hr { + box-sizing:content-box; + height:0 +} +code, +kbd, +pre, +samp { + font-family:monospace,monospace; + font-size:1em +} +button, +optgroup, +select, +textarea, +.rhdocsinput { + color:inherit; + font:inherit; + margin:0 +} +button { + overflow:visible +} +button, +select { + text-transform:none +} +button, +html input[type=button], +input[type=reset], +input[type=submit] { + -moz-appearance:button; + appearance:button; + -webkit-appearance:button; + cursor:pointer +} +button[disabled], +html input[disabled] { + cursor:default +} +button::-moz-focus-inner, +input::-moz-focus-inner { + border:0; + padding:0 +} +input { + line-height:normal +} +input[type=checkbox], +input[type=radio] { + box-sizing:border-box; + padding:0 +} +input[type=number]::-webkit-inner-spin-button, +input[type=number]::-webkit-outer-spin-button { + height:auto +} +input[type=search] { + -moz-appearance:textfield; + appearance:textfield; + -webkit-appearance:textfield; + box-sizing:content-box +} +input[type=search]::-webkit-search-cancel-button, +input[type=search]::-webkit-search-decoration { + -webkit-appearance:none +} +fieldset { + border:.0625rem solid silver; + margin:0 .125rem; + padding:.35em .625em .75em +} +legend { + border:0; + padding:0 +} +textarea { + overflow:auto +} +optgroup { + font-weight:700 +} +table { + border-collapse:collapse; + border-spacing:0 +} +td, +th { + padding:0 +} + +.admonitionblock>div:nth-child(2), +.caution>div:nth-child(2), +.important>div:nth-child(2), +.note>div:nth-child(2), +.tip>div:nth-child(2), +.warning>div:nth-child(2) { + margin:.5rem 0 +} +.admonitionblock>div:nth-child(2)>:first-child, +.caution>div:nth-child(2)>:first-child, +.important>div:nth-child(2)>:first-child, +.note>div:nth-child(2)>:first-child, +.tip>div:nth-child(2)>:first-child, +.warning>div:nth-child(2)>:first-child { + margin-top:0 +} +.admonitionblock>div:nth-child(2)>:last-child, +.caution>div:nth-child(2)>:last-child, +.important>div:nth-child(2)>:last-child, +.note>div:nth-child(2)>:last-child, +.tip>div:nth-child(2)>:last-child, +.warning>div:nth-child(2)>:last-child { + margin-bottom:0 +} +.listingblock+.listingblock, +pre+pre, +pre[class]+pre[class] { + margin-top:2rem +} +.listingblock { + background:#f8f8f8; + overflow:visible; + position:relative; + transform:translate(0); + z-index:0; + border: 1px solid grey; + border-radius:.1rem; +} +.listingblock:before { + background-repeat:no-repeat; + background-size:6.25rem 100%; + bottom:var(--scrollbar__height,1px); + content:""; + display:block; + height:7.125rem; + max-height:100%; + max-height:calc(100% - var(--scrollbar__height, 2px)); + position:absolute; + right:var(--scrollbar__width,6px); + top:.0625rem; + width:4.0625rem; + z-index:1 +} + +.colist>ol { + counter-reset:colist; + list-style:none; + margin:1rem 0 2rem; + padding:0 +} +.colist>ol>li { + counter-increment:colist; + margin:.5rem 0; + padding-left:1.75rem; + position:relative; +} +.colist>ol>li:before { + content:counter(colist); + left:0; + position:absolute; + top:.1875rem +} +.colist>ol>li:before, +.conum { + background:#06c; + border-radius:50%; + color:#fff; + display:inline-block; + font-family:RedHatText,Red Hat Text,Helvetica Neue,Arial,sans-serif; + font-style:normal; + font-weight:600; + height:1.25rem; + line-height:1.35rem; + padding:0; + text-align:center; + top:-.125em; + vertical-align:middle; + width:1.25rem +} +.colist div.paragraph { + margin:0 +} + +rh-table { + display:block +} +::-webkit-scrollbar, +:host ::-webkit-scrollbar { + height:.625rem; + width:.625rem +} +::-webkit-scrollbar, +::-webkit-scrollbar-track, +:host ::-webkit-scrollbar, +:host ::-webkit-scrollbar-track { + background-color:#d6d6d6 +} +::-webkit-scrollbar-thumb, +:host ::-webkit-scrollbar-thumb { + background-color:#8e8e8e +} +*, +:host * { + scrollbar-color:#8e8e8e #d6d6d6 +} +p:empty, +p:empty { + display:none +} +.book { + font-size: var(--rh-font-size-body-text-lg,1.125rem); + font-weight: var(--rh-font-weight-body-text-regular,400); + line-height: 1.6667; + padding-left: 6rem; + padding-right: 6rem; + padding-top: var(--rh-space-3xl,4rem); + max-width: 1400px; +} + +.book[class] h1 code, +.book[class] h2 code, +.book[class] h3 code, +.book[class] h4 code, +.book[class] h5 code, +.book[class] h6 code, +[class] h1 code, +[class] h2 code, +[class] h3 code, +[class] h4 code, +[class] h5 code, +[class] h6 code { + background:transparent; + border:0; + color:inherit; + font:inherit; + margin:0; + padding:0 +} +details[class] { + list-style:none; + margin:1rem 0 3rem; + padding:0 +} + +.preamble { + margin:0 0 2rem +} +.sect1 { + margin:2rem 0 1rem +} +:host .sect1, +cp-documentation .sect1 { + margin:0 0 2rem; + padding:.0625rem 0 0 +} +:host(.cp-documentation--has-external-header) .sect1:first-child>h2:first-child, +:host(.cp-documentation--has-external-header) .sect1:first-child>h3:first-child { + margin-top:0 +} +.listingblock, +.literalblock { + margin:1rem 0 +} +.quoteblock, +.verseblock { + border-left:.25rem solid #d2d2d2; + margin:1rem 0; + padding:1rem 1rem 1rem 2rem +} +.quoteblock.pullleft, +.verseblock.pullleft { + float:left; + margin-right:3rem; + width:25rem +} +@media (min-width:768px) { + .quoteblock.pullleft, + .verseblock.pullleft { + margin-left:-1rem + } +} +.quoteblock.pullright, +.verseblock.pullright { + float:right; + margin-left:3rem; + width:25rem +} +@media (min-width:768) { + .quoteblock.pullright, + .verseblock.pullright { + margin-right:-2rem + } +} +@media (min-width:1100px) { + .quoteblock.pullright, + .verseblock.pullright { + margin-right:-10rem + } +} +.quoteblock>:first-child, +.verseblock>:first-child { + margin-top:0 +} +.quoteblock .content, +.verseblock .content { + font-family:RedHatText,Red Hat Text,Helvetica Neue,Arial,sans-serif; + font-size:1.25rem; + line-height:1.6667 +} +.quoteblock .attribution, +.verseblock .attribution { + font-size:.875rem; + font-style:italic; + font-weight:600; + line-height:1.6667; + text-transform:uppercase +} +.quoteblock .attribution .citetitle, +.verseblock .attribution .citetitle { + color:#585858 +} +.quoteblock .attribution cite, +.verseblock .attribution cite { + font-size:1em +} +.quoteblock blockquote { + font-style:italic; + margin:0; + padding:0 +} +.quoteblock blockquote .content>:first-child { + margin-top:0 +} +.quoteblock blockquote .content>:first-child:before { + color:#e00; + content:"“"; + display:block; + float:left; + font-size:2.75rem; + font-style:normal; + line-height:1.125em; + margin-right:.5rem +} +.quoteblock blockquote .content>:first-child .content>:first-child:before { + content:none +} +.imageblock { + margin:1rem 0 +} +.imageblock.pullleft { + float:left; + margin-right:3rem; + width:25rem +} +@media (min-width:768px) { + .imageblock.pullleft { + margin-left:-1rem + } +} +.imageblock.pullright { + float:right; + margin-left:3rem; + width:25rem +} +@media (min-width:768) { + .imageblock.pullright { + margin-right:-2rem + } +} +@media (min-width:1100px) { + .imageblock.pullright { + margin-right:-10rem + } +} +.imageblock.interrupter { + margin:2rem 0 +} +@media (min-width:768px) { + .imageblock.interrupter { + margin-left:-1rem; + margin-right:-2rem + } + .imageblock.interrupter .caption { + margin-left:1rem; + margin-right:2rem + } +} +@media (min-width:1100px) { + .imageblock.interrupter { + margin-right:-10rem + } + .imageblock.interrupter .caption { + margin-right:10rem + } +} +.imageblock.interrupter img { + max-width:100% +} +.imageblock .caption { + color:#585858; + display:block; + font-size:.875rem; + line-height:1.6667; + margin:.5rem 0 0 +} +.rhdocs-footnotes { + border-top:.0625rem solid #d2d2d2; + margin:3rem 0 1rem; + padding:1rem 0 0 +} +.rhdocs-footnotes>ol { + margin:0; + padding:0 0 0 1.5rem +} +@supports (counter-reset:footnotenum) { + .rhdocs-footnotes>ol { + counter-reset:footnotenum; + list-style:none; + padding:0 + } + .rhdocs-footnotes>ol>li { + counter-increment:footnotenum + } + .rhdocs-footnotes>ol>li:before { + color:#585858; + content:"[" counter(footnotenum) "]"; + display:inline-block; + margin-right:.25rem + } +} +#footer { + background:#ededed; + color:#151515; + font-size:.875rem; + line-height:1.6667; + margin:3rem 0 0; + padding:1rem +} +.center { + margin-left:auto; + margin-right:auto +} +.stretch { + width:100% +} + +pre, +pre[class] { + margin:0; + padding:1.25em 1em; + position:relative +} +code[class*=language-], +pre[class*=language-] { + color:#151515; + -moz-tab-size:4; + -o-tab-size:4; + tab-size:4 +} +code.language-none, +code.language-text, +code.language-txt, +pre.language-none, +pre.language-text, +pre.language-txt { + color:#151515 +} +code[class*=language-] ::-moz-selection, +code[class*=language-]::-moz-selection, +pre[class*=language-] ::-moz-selection, +pre[class*=language-]::-moz-selection { + background:#cceae7; + color:#263238 +} +code[class*=language-] ::selection, +code[class*=language-]::selection, +pre[class*=language-] ::selection, +pre[class*=language-]::selection { + background:#cceae7; + color:#263238 +} +:not(pre)>code[class*=language-] { + border-radius:.2em; + padding:.1em; + white-space:normal +} +.token.atrule { + color:#40199a +} +.token.attr-name { + color:#06c +} +.token.attr-value, +.token.attribute { + color:#b300b3 +} +.token.boolean { + color:#40199a +} +.token.builtin, +.token.cdata, +.token.char, +.token.class, +.token.class-name { + color:#06c +} +.token.comment { + color:#6a6e73 +} +.token.constant { + color:#40199a +} +.token.deleted { + color:#c9190b +} +.token.doctype { + color:#6a6e73 +} +.token.entity { + color:#c9190b +} +.token.function { + color:#40199a +} +.token.hexcode { + color:#b300b3 +} +.token.id, +.token.important { + color:#40199a; + font-weight:700 +} +.token.inserted { + color:#06c +} +.token.keyword { + color:#40199a +} +.token.number { + color:#b300b3 +} +.token.operator { + color:#06c +} +.token.prolog { + color:#6a6e73 +} +.token.property { + color:#06c +} +.token.pseudo-class, +.token.pseudo-element { + color:#b300b3 +} +.token.punctuation, +.token.regex { + color:#06c +} +.token.selector { + color:#c9190b +} +.token.string { + color:#b300b3 +} +.token.symbol { + color:#40199a +} +.token.unit { + color:#b300b3 +} +.token.url, +.token.variable { + color:#c9190b +} + +@media print { + .field code, + .field pre, + code[class*=language-], + pre, + pre[class*=language-] { + white-space:pre-wrap!important; + word-wrap:break-word!important; + overflow-wrap:break-word!important; + word-break:break-word!important + } +} + +.book>.titlepage:not(:last-child), +.chapter, +section[id] { + padding-bottom:3.75rem +} +.book>.titlepage .chapter:last-child, +.book>.titlepage section[id]:last-child, +.chapter .chapter:last-child, +.chapter section[id]:last-child, +section[id] .chapter:last-child, +section[id] section[id]:last-child { + margin-bottom:-3.75rem +} +.listingblock+section[id], +pre+section[id] { + padding-top:3.75rem +} +.cta-link { + font-size:inherit +} +a { + word-wrap:break-word; + overflow-wrap:break-word +} +.caution, +.important, +.note, +.tip, +.warning { + padding:.8888888889em; + position:relative +} +.caution { + background-color:#fff4cc; + border-top:2px solid #dca614 +} +.important { + background-color:#fff4cc; + border-top:2px solid #dca614 +} +.note { + background-color:#e0f0ff; + border-top:2px solid #4394e5 +} +.tip { + background-color:#e0f0ff; + border-top:2px solid #4394e5 +} +.warning { + background-color:#fff4cc; + border-top:2px solid #dca614 +} +.book>.titlepage, +.chapter, +section[id] { + padding-bottom:var(--rh-space-4xl,64px) +} + +.titlepage .svg-img[data*="title_logo.svg"] { + margin:1.5rem 0; + width:15rem +} +.paragraph { + margin:1.49963rem 0 +} +.paragraph[class] { + margin-bottom:1.49963rem +} +dd { + margin-bottom:2.5rem +} + +.example { + border-left:.3125rem solid #ccc; + margin-bottom:2rem; + padding:1rem 0 1rem 1rem +} + +.annotator-outer[class][class] { + display:none; + flex-direction:column; + flex-grow:1; + height:auto; + margin:0; + position:static; + width:auto +} +@media (min-width:1400px) { + .annotator-outer[class][class] { + display:flex + } +} + +.producttitle { + color:#000; + font-size:1.25rem; + text-transform:uppercase +} +.title { + font-size:1rem; + font-style:normal; + font-weight:700; + line-height:1.6667; + margin:1.25rem 0 0; + text-transform:none +} +.paragraph>.title[class]+.content>:first-child, +.paragraph>.title[class]+p, +p.title[class]+.content>:first-child, +p.title[class]+p { + margin-top:0 +} +[class] pre .caution, +[class] pre .important, +[class] pre .note, +[class] pre .tip, +[class] pre .warning { + background:transparent; + border:0; + color:inherit; + font:inherit; + margin:0; + padding:0 +} +[class] pre .caution:after, +[class] pre .important:after, +[class] pre .note:after, +[class] pre .tip:after, +[class] pre .warning:after { + content:none +} +[class] code.email { + background-color:transparent; + font:inherit; + padding:0 +} +[class] .author { + margin-bottom:1.5rem +} +[class] .author .author { + margin-bottom:0 +} +table { + margin:2rem 0 +} +[class] table { + width:auto +} +table .table-contents table { + max-width:100%; + overflow:auto +} +rh-table table { + margin:0; + max-width:9999em; + overflow:visible +} +td, +th { + border-left:0; + padding:.5em 1rem; + transition:background .25s ease-out +} +td.content--md[class][class], +th.content--md[class][class] { + min-width:13em +} +td.content--lg[class][class], +th.content--lg[class][class] { + min-width:20em +} +thead th { + padding-top:1.5em +} +caption { + color:currentColor; + color:var(--pfe-table__caption--Color,currentColor); + font-weight:700; + margin-bottom:.5rem; + margin-top:.5rem; + text-align:center +} +.revhistory table td, +.revhistory table th { + border-color:transparent +} +.revhistory table td { + padding:.625rem .875rem +} +.revhistory table.simplelist { + margin:0 +} +@media print { + #masthead { + display:none!important + } +} +.rh-table--is-full-screen #to-top { + display:none +} +body { + --rh-table--maxHeight: calc(100vh - 6.25rem) ; + color:#151515; + background-color:white; + font-family:var(--rh-font-family-body-text,RedHatText,"Red Hat Text","Noto Sans Arabic","Noto Sans Hebrew","Noto Sans JP","Noto Sans KR","Noto Sans Malayalam","Noto Sans SC","Noto Sans TC","Noto Sans Thai",Helvetica,Arial,sans-serif); + font-size:var(--rh-body-copy-lage,1.125rem); + line-height:1.6667; + -moz-tab-size:4; + -o-tab-size:4; + tab-size:4 +} +rh-codeblock::slotted(#content) { + border-radius:.25rem; + padding:var (--rh-space-lg,16px) +} +rh-codeblock .screen { + display:grid; + grid-template-columns:1fr 4.375rem +} +rh-codeblock[class][class][class][class][class] { + max-width:99999em +} +pre { + border:0; + max-height:-moz-max-content; + max-height:max-content +} +pre, +pre[class] { + margin:0; + padding:1.25em 1em; + position:relative +} +rh-code-block>div.codeblock__inner-wrapper>pre, +rh-code-block>div.codeblock__inner-wrapper>pre[class] { + margin:0; + padding:0; + position:relative +} +code[class*=language-], +pre[class*=language-] { + color:#151515; + -moz-tab-size:4; + -o-tab-size:4; + tab-size:4 +} +code.literal { + background:#eee; + border-radius:.25rem; + color:#000; + font-size:.875rem; + line-height:1.6667; + overflow-wrap:break-word; + padding:.125em .5em; + word-break:break-word +} +code.literal, +kbd, +span.keycap { + font-family:RedHatMono,Red Hat Mono,Consolas,monospace +} +kbd, +span.keycap { + background-color:#eee; + background-image:linear-gradient(180deg,#ddd,#eee,#fff); + border-radius:.1875rem; + box-shadow:0 -.0625rem #fff,0 .0625rem 0 .1875rem #aaa; + font-size:90%; + font-weight:400; + margin:0 .25rem; + padding:.125rem .375rem +} +ol, +ul { + margin:1rem 0; + padding:0 0 0 1.5rem +} +._additional-resources[class][class], +._additional-resources[class][class][id]:last-child { + background:#fff; + border:.0625rem solid #d2d2d2; + border-radius:.1875rem; + margin:2em 0 4em; + padding:2rem 2rem 1rem +} +._additional-resources[class][class] ul { + border:0; + list-style:none; + margin:0; + padding:0; + position:relative +} +._additional-resources[class][class] li { + border-bottom:.0625rem solid #d2d2d2; + box-sizing:content-box; + margin:0; + padding:1rem 1.5rem 1rem 0; + -moz-column-break-inside:avoid; + break-inside:avoid +} +._additional-resources[class][class] li:last-child { + border:0 +} +section.section#additional_resource .additional-resources__heading, +section.section#additional_resource .heading, +section.section#additional_resource h1, +section.section#additional_resource h2, +section.section#additional_resource h3, +section.section#additional_resource h4, +section.section#additional_resource h5, +section.section#additional_resource h6, +section.section#additional_resource p.title { + display:block; + font-family:RedHatDisplay,Red Hat Display,Helvetica Neue,Arial,sans-serif; + font-size:1.125rem; + font-weight:700; + line-height:1.5rem; + margin:0 0 .5rem; + padding:0; + text-transform:uppercase +} +section.section:first-of-type { + margin-top:var(--rh-space-4xl,64px) +} +section.section p { + margin-bottom:var(--rh-space-lg,16px); + margin-top:0 +} + + +dl { + display:block; + margin-block-end:1em; + margin-block-start:1em; + margin-inline-end:0; + margin-inline-start:0 +} +.paragraph { + margin:1.49963rem 0 +} +img, +object, +svg { + display:inline-block; + max-width:100%; + vertical-align:middle +} +.titlepage .svg-img[data*="title_logo.svg"] { + margin:1.5rem 0; + width:15rem +} +.book[class] .author { + margin-bottom:1.5rem +} +.book[class] .author .author { + margin-bottom:0 +} +.paragraph>.title[class], +p.title[class] { + font-size:1rem; + font-style:normal; + font-weight:700; + line-height:1.6667; + margin:1.25rem 0 0 +} +.example { + border-left:.3125rem solid #ccc; + margin-bottom:2rem; + padding:1rem 0 1rem 1rem +} +code { + background:#eee; + color:#000; + font-family:RedHatMono,Red Hat Mono,Consolas,monospace; + font-size:.875rem; + line-height:1.6667; + overflow-wrap:break-word; + padding:.125em .5em; + word-break:break-word +} +.paragraph[class] { + margin-bottom:1.49963rem +} +.book[class] code.email { + background-color:transparent; + font:inherit; + padding:0 +} + +.producttitle { + color:#000; + font-size:1.25rem; + text-transform:uppercase +} +dl { + margin:1rem 0 +} +dl dt { + font-weight:600; + margin:.5rem 0 +} +ol ol { + list-style:lower-roman +} +.codeblock--processed pf-clipboard-copy::part(input), +.codeblock--processed pf-clipboard-copy::part(span) { + display:none +} +.token.tag { + color:#c9190b +} + +.admonitionblock { + margin-bottom:var(--rh-space-lg,1rem) +} +.guibutton, +.guimenu, +.guimenuitem { + font-weight:700 +} +.guibutton { + font-size:90%; + padding:.1875rem +} +.guibutton:before { + content:"[" +} +.guibutton:after { + content:"]" +} +.book, +{ + --rh-table--maxHeight: calc(100vh - 6.25rem) ; + color:#151515; + font-family:RedHatText,Red Hat Text,Helvetica Neue,Arial,sans-serif; + font-size:1.125rem; + line-height:1.6667; + -moz-tab-size:4; + -o-tab-size:4; + tab-size:4 +} +pre[hidden] { + display:none +} +.listingblock { + background:var(--rh-color-surface-lighter,#f2f2f2); + margin:1rem 0; + overflow:visible; + position:relative; + transform:translate(0); + z-index:0 +} +nopre { + display:block; + font-size:.8125rem; + line-height:1.42857; + margin:0 0 .625rem; + word-break:break-all; + word-wrap:break-word; + background-color:var(--rh-color-surface-lighter,#f2f2f2); + border:.0625rem solid #ccc; + border-radius:.25rem; + color:#333 +} +.book pre, +pre { + background:var(--rh-color-surface-lighter,#f2f2f2); + color:#151515; + font-family:RedHatMono,Red Hat Mono,Consolas,monospace; + font-size:.875rem; + line-height:1.6667; + overflow-wrap:normal; + white-space:pre; + word-break:normal +} +pre[class] { + line-height:1.6667; + overflow-x:auto +} +*, +:after, +:before { + box-sizing:border-box +} +:root { + --rh-space-xs:4px; + --rh-space-sm:6px; + --rh-space-md:8px; + --rh-space-lg:16px; + --rh-space-xl:24px; + --rh-space-2xl:32px; + --rh-space-3xl:48px; + --rh-space-4xl:64px; + --rh-space-5xl:80px; + --rh-space-6xl:96px; + --rh-space-7xl:128px; + --rh-font-size-body-text-xs:.75rem; + --rh-font-size-body-text-sm:.875rem; + --rh-font-size-body-text-md:1rem; + --rh-font-size-body-text-lg:1.125rem; + --rh-font-size-body-text-xl:1.25rem; + --rh-font-size-body-text-2xl:1.5rem; + --rh-font-size-heading-xs:1.25rem; + --rh-font-size-heading-sm:1.5rem; + --rh-font-size-heading-md:1.75rem; + --rh-font-size-heading-lg:2.25rem; + --rh-font-size-heading-xl:2.5rem; + --rh-font-size-heading-2xl:3rem; + --pfe-navigation--logo--maxWidth:200px; + --pfe-navigation__logo--height:40px; + --pfe-navigation--fade-transition-delay:.5s; + --pfe-navigation__nav-bar--highlight-color:var(--rh-color-brand-red-on-dark,#e00); + --pf-global--icon--FontSize--sm:.75rem +} +body, +html { + font-family:Red Hat Text,sans-serif; + font-size:var(--rh-font-size-body-text-md,1rem); + line-height:var(--rh-line-height-body-text,1.5); +} + +main { + line-height:30px +} +section { + padding-bottom:3rem; + padding-top:3rem +} +img { + max-width:100% +} +a { + color:var(--rh-color-interactive-blue-darker,#06c); + text-decoration:underline +} +a:hover { + color:var(--rh-color-interactive-blue-darkest,#004080) +} + +@media (min-width:576px) { + .book { + max-width:540px + } +} +@media (min-width:768px) { + .book { + max-width:720px + } +} +@media (min-width:992px) { + .book { + max-width:1250px + } +} +section { + padding:0 +} +.list-unstyled { + list-style:none; + padding-left:0 +} +.doc-image-link { + display:inline-block; + text-decoration:none +} +.admonitionblock{ + margin-bottom:var(--rh-space-lg,1rem) + font-size:var(--rh-font-size-body-text-md,1rem) +} +.admonitionblock table { + margin:0; + border:none; + width:fit-content +} +.admonitionblock td { + margin:0; + border:none; + display:block +} +.admonitionblock .title { + margin:0; + color:#002952; + font-weight:700 +} + diff --git a/.asciidoctorconfig b/.asciidoctorconfig new file mode 100644 index 0000000000..1f0bc8b86e --- /dev/null +++ b/.asciidoctorconfig @@ -0,0 +1,19 @@ +// .asciidoctorconfig +// Specifies Asciidoctor configuration to preview files in the editor +// See: https://intellij-asciidoc-plugin.ahus1.de/docs/users-guide/features/advanced/asciidoctorconfig-file.html + +// Set up attributes +include::{asciidoctorconfigdir}/artifacts/attributes.adoc[] + +// Set up preview style +:chapter-signifier: Chapter +:docinfo: shared +:docinfodir: {asciidoctorconfigdir}/.asciidoctor +:doctype: book +:sectnumlevels: 5 +:sectnums: +:source-highlighter: coderay +:stylesdir: {asciidoctorconfigdir}/.asciidoctor +:stylesheet: docs.css +:toc: left +:toclevels: 5 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..e826aa95e4 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +# .editorconfig +# Specifies editor configuration +# See: https://editorconfig.org/ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 2 +indent_style = space +insert_final_newline = true +max_line_length = 120 +tab_width = 2 +trim_trailing_whitespace = true diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..a7b0a0f45d --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,28 @@ + + + + +**IMPORTANT: Do Not Merge - To be merged by Docs Team Only** + +**Version(s):** + + +**Issue:** + + +**Link to docs preview:** + + +**Reviews:** +- [ ] SME: @ mention assignee +- [ ] QE: @ mention assignee +- [ ] Docs review: @ mention assignee +- [ ] Additional review: @mention assignee (by writer) + + + + +**Additional information:** + + + diff --git a/.github/workflows/build-asciidoc.yml b/.github/workflows/build-asciidoc.yml new file mode 100644 index 0000000000..8b81b84e5b --- /dev/null +++ b/.github/workflows/build-asciidoc.yml @@ -0,0 +1,90 @@ +# Copyright (c) 2023 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: GitHub Pages + +on: + push: + branches: + - main + - rhdh-1.** + - 1.**.x + - release-1.** + +jobs: + adoc_build: + name: Asciidoctor Build For GH Pages + runs-on: ubuntu-latest + permissions: + contents: write + concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup environment + run: | + # update + sudo apt-get update -y || true + # install + sudo apt-get -y -q install asciidoctor && asciidoctor --version + echo "GIT_BRANCH=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_ENV + + - name: Build guides and indexes + run: | + echo "Building branch ${{ env.GIT_BRANCH }}" + build/scripts/build.sh -b ${{ env.GIT_BRANCH }} + + # repo must be public for this to work + - name: Deploy + uses: peaceiris/actions-gh-pages@v4 + # if: github.ref == 'refs/heads/main' + with: + github_token: ${{ secrets.RHDH_BOT_TOKEN }} + publish_branch: gh-pages + keep_files: true + publish_dir: ./titles-generated + + - name: Cleanup merged PR branches + run: | + PULL_URL="https://api.github.com/repos/redhat-developer/red-hat-developers-documentation-rhdh/pulls" + GITHUB_TOKEN="${{ secrets.RHDH_BOT_TOKEN }}" + git config user.name "rhdh-bot service account" + git config user.email "rhdh-bot@redhat.com" + + git checkout gh-pages; git pull || true + dirs=$(find . -maxdepth 1 -name "pr-*" -type d | sed -r -e "s|^\./pr-||") + refs=$(cat pulls.html | grep pr- | sed -r -e "s|.+.html>pr-([0-9]+).+|\1|") + for d in $(echo -e "$dirs\n$refs" | sort -uV); do + PR="${d}" + echo -n "Check merge status of PR $PR ... " + PR_JSON=$(curl -sSL -H "Accept: application/vnd.github+json" -H "Authorization: Bearer $GITHUB_TOKEN" "$PULL_URL/$PR") + if [[ $(echo "$PR_JSON" | grep merged\") == *"merged\": true"* ]]; then + echo "merged, can delete from pulls.html and remove folder $d" + git rm -fr --quiet "pr-${d}" || rm -fr "pr-${d}" + sed -r -e "/pr-$PR\/index.html>pr-$PRpr-$PR> $GITHUB_ENV + + - name: Build guides and indexes + run: | + echo "Building PR ${{ github.event.pull_request.number }}" + build/scripts/build.sh -b "pr-${{ github.event.number }}" + + - name: Pull from origin before pushing (if possible) + run: | + /usr/bin/git pull origin gh-pages || true + + # repo must be public for this to work + - name: Deploy + uses: peaceiris/actions-gh-pages@v4 + # if: github.ref == 'refs/heads/main' + with: + github_token: ${{ secrets.RHDH_BOT_TOKEN }} + publish_branch: gh-pages + keep_files: true + publish_dir: ./titles-generated + + - name: PR comment with doc preview, replacing existing comments with a new one each time + shell: bash + env: + GH_TOKEN: ${{ secrets.RHDH_BOT_TOKEN }} + run: | + PR_NUM="${{ github.event.number }}" + ORG_REPO="${{ github.repository_owner }}/${{ github.event.repository.name }}" + gh repo set-default "${ORG_REPO}" + # for a given PR, check for existing comments from rhdh-bot; select the last one (if more than one) + if [[ $(gh api "repos/${ORG_REPO}/issues/${PR_NUM}/comments" -q 'map(select(.user.login=="rhdh-bot"))|last|.id') ]]; then + # edit that comment: + gh pr comment ${PR_NUM} -R "${ORG_REPO}" --edit-last --body "Updated preview: https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/pr-${PR_NUM}/ @ $(date "+%x %X")" + else + # or create a new one: + gh pr comment ${PR_NUM} -R "${ORG_REPO}" --body "Preview: https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/pr-${PR_NUM}/" + fi diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..483e61755e --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +.DS_Store +titles/*/build +index.html +titles-generated/ +.vale.ini +.vale-styles/RedHat +.vscode +.cache/ +.vscode/ diff --git a/.htmltest.yml b/.htmltest.yml new file mode 100644 index 0000000000..9d386e9175 --- /dev/null +++ b/.htmltest.yml @@ -0,0 +1,13 @@ +--- +# .htmltest.yml +# Defines htmltest configuration +# See: https://github.com/wjdp/htmltest +# +DirectoryPath: titles-generated/main +CheckDoctype: false +ExternalTimeout: 30 +OutputDir: .cache/htmltest +IgnoreDirectoryMissingTrailingSlash: true +IgnoreSSLVerify: true +IgnoreURLs: # List URLS that are not published, false positives, websites refusing crawlers + - https://docs.github.com/ diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 0000000000..1f786b9955 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1,6 @@ +semi: false +singleQuote: true +tabWidth: 2 +trailingComma: es5 +options: + editorconfig: true diff --git a/.sync/sync-manual-trigger.png b/.sync/sync-manual-trigger.png new file mode 100644 index 0000000000..90654613c9 Binary files /dev/null and b/.sync/sync-manual-trigger.png differ diff --git a/.vale-styles/DeveloperHub/Attributes.yml b/.vale-styles/DeveloperHub/Attributes.yml new file mode 100644 index 0000000000..90120f7ff0 --- /dev/null +++ b/.vale-styles/DeveloperHub/Attributes.yml @@ -0,0 +1,15 @@ +--- +extends: substitution +ignorecase: false +level: error +message: Use the AsciiDoc attribute '{%s}' rather than '%s'. +nonword: true +scope: raw +swap: + '{product} Hub': product + product Hub: product + Backstage: product + Developer Hub: product-short + Hub Hub: product + Red Hat Developer Hub: product + Red Hat {product}: product diff --git a/.vale-styles/DeveloperHub/tests/fail.adoc b/.vale-styles/DeveloperHub/tests/fail.adoc new file mode 100644 index 0000000000..f76d3e1579 --- /dev/null +++ b/.vale-styles/DeveloperHub/tests/fail.adoc @@ -0,0 +1,7 @@ +Configure {product} Hub. +Install product Hub. +Configure Backstage. +Install Developer Hub. +Configure Red Hat Developer Hub Hub. +Install Red Hat Developer Hub. +Configure Red Hat {product}. diff --git a/.vale-styles/DeveloperHub/tests/pass.adoc b/.vale-styles/DeveloperHub/tests/pass.adoc new file mode 100644 index 0000000000..6548cbda9a --- /dev/null +++ b/.vale-styles/DeveloperHub/tests/pass.adoc @@ -0,0 +1,4 @@ +{product} +{product-short} +backstage +developer hub diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..8415ace990 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.adoc b/README.adoc new file mode 100644 index 0000000000..746ac63c80 --- /dev/null +++ b/README.adoc @@ -0,0 +1,28 @@ +# Red Hat Developer Hub documentation + +This repo is the upstream mirror of https://gitlab.cee.redhat.com/red-hat-developers-documentation/rhdh + +Contribute to this repo and merged changes will be synced to gitlab for build with Pantheon. + +## Building locally + +. Install `asciidoctor` - see https://docs.asciidoctor.org/asciidoctor/latest/install/linux-packaging/ +. Run `build/scripts/build.sh` to generate html with images in titles-generated/ folders + +## Previews + +Commits to this repo can be seen as GH Page content here: + +https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/ + +Pull requests will have a link to the generated HTML attached a comment. + +## Contributing + +Submit a pull request against this repo. + + +## Plugins Reference Guide +For Plugin configuration guide, see upstream content in https://github.com/janus-idp/backstage-plugins/tree/main/plugins + +TODO: transform backstage-plugins markdown content into adoc, hosted in this repo. See https://issues.redhat.com/browse/RHIDP-451 for details. diff --git a/README.sync.adoc b/README.sync.adoc new file mode 100644 index 0000000000..fc9bd56d55 --- /dev/null +++ b/README.sync.adoc @@ -0,0 +1,41 @@ +# Syncing between GitHub and GitLab + + +## Upstream + +The upstream repo for contributions to the *Red Hat Developer Hub product documentation* is: + +* https://github.com/redhat-developer/red-hat-developers-documentation-rhdh + + +## Downstream + +The downstream repo (with pantheon integration) is: + +https://gitlab.cee.redhat.com/red-hat-developers-documentation/rhdh + +Please make changes to the GitHub repo. Changes made to GitLab will be overwritten. + + +## Sync configuration + +Sync is set up for any and all https://github.com/redhat-developer/red-hat-developers-documentation-rhdh/settings/branches[PROTECTED branches] in the upstream GitHub repo. + +### Repo mirror configuration + +Should you meed to recreate the mirror setup, here are the steps: + +1. delete and create a new mirror rule from the `Mirroring repositories` section under https://gitlab.cee.redhat.com/red-hat-developers-documentation/rhdh/-/settings/repository[Settings > Repository]. + +* use the format https://bot-user@github.com/redhat-developer/rhdh-documentation-sync.git (NOTE: use the *original* repo name for redhat-developer/red-hat-developers-documentation-rhdh or sync will fail) + +* for the password, use the bot-user's PAT (see gitlab repo rhidp/productization/secrets for the latest or to create a new one when the current one expires) + +* select the option to only mirror https://github.com/redhat-developer/red-hat-developers-documentation-rhdh/settings/branches[PROTECTED branches] from the upstream repo + +2. apply new Personal Access Token under https://gitlab.cee.redhat.com/red-hat-developers-documentation/rhdh/-/settings/integrations/github/edit[Settings > Integrations > Github]. + +3. You can manually trigger a sync from the `Mirroring repositories` section under https://gitlab.cee.redhat.com/red-hat-developers-documentation/rhdh/-/settings/repository[Settings > Repository]. + +image::.sync/sync-manual-trigger.png[Repository Settings > expand Mirroring repositories > click circular arrows button] + diff --git a/artifacts/attributes.adoc b/artifacts/attributes.adoc new file mode 100644 index 0000000000..aa462f3134 --- /dev/null +++ b/artifacts/attributes.adoc @@ -0,0 +1,105 @@ +:red-hat-developers-documentation: +:imagesdir: +:idseparator: - + +// Company names +:company-name: Red Hat + +// Products +:product-author: CCS +:product: Red Hat Developer Hub +:product-short: Developer Hub +:product-very-short: RHDH +:product-version: 1.3 +:product-bundle-version: 1.3.0 +:product-chart-version: 1.3.0 +:product-backstage-version: 1.29.2 +:rhdeveloper-name: Red Hat Developer +:rhel: Red Hat Enterprise Linux +:odf-name: OpenShift Data Foundation + +:my-product-url: https://____ + +// Red Hat Platforms +:ocp-brand-name: Red Hat OpenShift Container Platform +:ocp-short: OpenShift Container Platform +:ocp-very-short: RHOCP +:osd-brand-name: Red Hat OpenShift Dedicated +:osd-short: OpenShift Dedicated +// minimum and current latest supported versions +:ocp-version-min: 4.14 +:ocp-version: 4.16 +// First mention of OpenShift CLI or `oc` in a module +:openshift-cli: pass:quotes[OpenShift CLI (`oc`)] +:rhsso-brand-name: Red Hat Single-Sign On +:rhsso: RHSSO + +// Partner Platforms +:aws-brand-name: Amazon Web Services +:aws-short: AWS +:azure-brand-name: Microsoft Azure +:azure-short: Azure +:eks-brand-name: Amazon Elastic Kubernetes Service +:eks-name: Elastic Kubernetes Service +:eks-short: EKS +:aks-brand-name: Microsoft Azure Kubernetes Service +:aks-name: Azure Kubernetes Service +:aks-short: AKS +:gke-brand-name: Google Kubernetes Engine +:gke-short: GKE +:gcp-brand-name: Google Cloud Platform +:gcp-short: GCP + +// Release Notes +:rn-product-title: Release notes for Red Hat Developer Hub + +// Red Hat Developer Hub administration guide +:ag-product-title: Administration guide for Red Hat Developer Hub + +// Red Hat Developer Hub getting started guide +:gs-product-title: Getting started with Red Hat Developer Hub + +// Backstage Plugins for Red Hat Developer Hub +//:bs-product-title: Backstage Plugins for Red Hat Developer Hub + +// User Guide +:ug-product-title: Red Hat Developer Hub User Guide + +// Links +:LinkAdminGuide: https://access.redhat.com/documentation/en-us/red_hat_developer_hub/{product-version}/html-single/administration_guide_for_red_hat_developer_hub/index +:NameOfAdminGuide: Administration guide for {product} + +:LinkGettingStartedGuide: https://access.redhat.com/documentation/en-us/red_hat_developer_hub/{product-version}/html-single/getting_started_with_red_hat_developer_hub/index + +:LinkPluginsGuide: https://access.redhat.com/documentation/en-us/red_hat_developer_hub/{product-version}/html-single/configuring_plugins_in_red_hat_developer_hub/index +:NameOfPluginsGuide: Configuring plugins in {product} + +:release-notes-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/release_notes/index +:release-notes-title: Release notes + +:installing-and-viewing-dynamic-plugins-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_and_viewing_dynamic_plugins/index +:installing-and-viewing-dynamic-plugins-title: Installing and viewing dynamic plugins + +:authentication-book-url: https://docs.redhat.com/documentation/en-us/red_hat_developer_hub/{product-version}/html-single/authentication/index +:authentication-book-title: Authentication + +:authorization-book-url: https://docs.redhat.com/documentation/en-us/red_hat_developer_hub/{product-version}/html-single/authorization/index +:authorization-book-title: Authorization + +:installing-on-ocp-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_red_hat_developer_hub_on_openshift_container_platform/index +:installing-on-ocp-book-title: Installing {product} on {ocp-short} + +:installing-on-eks-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_red_hat_developer_hub_on_amazon_elastic_kubernetes_service/index +:installing-on-eks-book-title: Installing {product} on {eks-brand-name} + +:installing-on-aks-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_red_hat_developer_hub_on_microsoft_azure_kubernetes_service/index +:installing-on-aks-book-title: Installing {product} on {aks-brand-name} + +:installing-in-air-gap-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_red_hat_developer_hub_in_an_air-gapped_environment/index +:installing-in-air-gap-book-title: Installing {product} in an air-gapped environment + +:upgrading-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/upgrading_red_hat_developer_hub/index +:upgrading-book-title: Upgrading {product} + +:plugins-configure-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/configuring_dynamic_plugins/index +:plugins-configure-book-title: Configuring dynamic plugins diff --git a/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..45352c66b8 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-install-dynamic-plugin.adoc @@ -0,0 +1,22 @@ +.Installation +The AAP backend plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-aap-backend-dynamic + disabled: false +---- + +.Basic configuration +To enable the AAP plugin, you must set the following environment variables: + +* `AAP_BASE_URL` + +* `AAP AUTH TOKEN` + +.Advanced configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-plugin-readme.adoc new file mode 100644 index 0000000000..bc06dc0cf1 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/aap-backend/aap-backend-plugin-readme.adoc @@ -0,0 +1,116 @@ += Installation and configuration of Ansible Automation Platform + +The Ansible Automation Platform (AAP) plugin synchronizes the accessible templates including job templates and workflow job templates from AAP into your {product-short} catalog. + +[IMPORTANT] +==== +The Ansible Automation Platform plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +== For administrators + +=== Installing and configuring the AAP Backend plugin + +The AAP backend plugin allows you to configure one or multiple providers using your `app-config.yaml` configuration file in {product-short}. + +.Prerequisites +* Your {product-short} application is installed and running. +* You have created an account in Ansible Automation Platform. + +.Installation +The AAP backend plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-aap-backend-dynamic + disabled: false +---- + +.Basic configuration +To enable the AAP plugin, you must set the following environment variables: + +* `AAP_BASE_URL`: Base URL of the service + +* `AAP AUTH TOKEN`: Authentication token for the service + +.Advanced configuration + +. You can use the `aap` marker to configure the `app-config.yaml` file of {product-short} as follows: ++ +-- +[source,yaml] +---- + catalog: + providers: + aap: + dev: + baseUrl: $(AAP_BASE_URL) + authorization: 'Bearer ${AAP_AUTH_TOKEN}' + owner: + system: + schedule: # optional; same options as in TaskScheduleDefinition + # supports cron, ISO duration, "human duration" as used in code + frequency: { minutes: 1 } + # supports ISO duration, "human duration" as used in code + timeout: { minutes: 1 } +---- +-- + +=== Log lines for AAP Backend plugin troubleshoot + +When you start your {product-short} application, you can see the following log lines: + +[source,log] +---- +[1] 2023-02-13T15:26:09.356Z catalog info Discovered ResourceEntity API type=plugin target=AapResourceEntityProvider:dev +[1] 2023-02-13T15:26:09.423Z catalog info Discovered ResourceEntity Red Hat Event (DEV, v1.2.0) type=plugin target=AapResourceEntityProvider:dev +[1] 2023-02-13T15:26:09.620Z catalog info Discovered ResourceEntity Red Hat Event (TEST, v1.1.1) type=plugin target=AapResourceEntityProvider:dev +[1] 2023-02-13T15:26:09.819Z catalog info Discovered ResourceEntity Red Hat Event (PROD, v1.1.1) type=plugin target=AapResourceEntityProvider:dev +[1] 2023-02-13T15:26:09.819Z catalog info Applying the mutation with 3 entities type=plugin target=AapResourceEntityProvider:dev +---- + +== For users + +=== Accessing templates from AAP in {product-short} + +When you have configured the AAP backend plugin successfully, it synchronizes the templates including job templates and workflow job templates from AAP and displays them on the {product-short} Catalog page as Resources. + +.Prerequisites + +* Your {product-short} application is installed and running. +* You have installed the AAP backend plugin. For installation and configuration instructions, see <>. + +.Procedure + +. Open your {product-short} application and Go to the *Catalog* page. +. Select *Resource* from the *Kind* drop-down and *job template* or *workflow job template* from the *Type* drop-down on the left side of the page. ++ +-- +image::rhdh-plugins-reference/aap-backend-plugin-user1.png[aap-backend-plugin-backstage] + +A list of all the available templates from AAP appears on the page. +-- + +. Select a template from the list. ++ +-- +The *OVERVIEW* tab appears containing different cards, such as: + +* *About*: Provides detailed information about the template. +* *Relations*: Displays the visual representation of the template and associated aspects. +* *Links*: Contains links to the AAP dashboard and the details page of the template. +* *Has subcomponents*: Displays a list of associated subcomponents. + +image::rhdh-plugins-reference/aap-backend-plugin-user2.png[aap-backend-plugin-backstage-details] +-- diff --git a/artifacts/rhdh-plugins-reference/acr/acr-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/acr/acr-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..0a07ccf2ba --- /dev/null +++ b/artifacts/rhdh-plugins-reference/acr/acr-install-dynamic-plugin.adoc @@ -0,0 +1,15 @@ +.Installation +The ACR plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-acr + disabled: false +---- + +.Configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/acr/acr-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/acr/acr-plugin-readme.adoc new file mode 100644 index 0000000000..2f0f68c778 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/acr/acr-plugin-readme.adoc @@ -0,0 +1,153 @@ += Azure Container Registry + +The Azure Container Registry (ACR) plugin displays information about your container images available in the Azure Container Registry. + +[IMPORTANT] +==== +The Azure Container Registry plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +== For administrators + +=== Installing and configuring the ACR plugin +ifeval::[{doc-show-dynamic-content} == true] +include::../../../artifacts/rhdh-plugins-reference/acr/acr-install-dynamic-plugin.adoc[leveloffset=+2] +endif::[] + +ifeval::[{doc-show-dynamic-content} == false] +The Red Hat Plug-ins for Backstage (RHPIB) packages are hosted in a separate NPM registry, which is maintained by Red Hat. To use these packages, you must adjust your NPM configuration to pull the `@redhat` scoped packages: + +[source] +---- +# update your .npmrc or .yarnrc file +yarn config set "@redhat:registry" https://npm.registry.redhat.com +# then pull a package +yarn add @redhat/backstage-plugin-quay +---- + +For more information, see link:https://docs.npmjs.com/cli/v9/configuring-npm/npmrc[npm docs]. + +Creating a `.npmrc` file ensures that all the packages are scoped under `@redhat` and are fetched from link:https://npm.registry.redhat.com/[Red Hat's NPM registry], while the rest dependencies remain sourced from other link:registry.npmjs.org[registry]. + +.Procedure + +. Run the following command to install the ACR plugin: ++ +-- +[source] +---- +yarn workspace app add @redhat/backstage-plugin-acr +---- +-- +endif::[] + +. Set the proxy to the desired ACR server in the `app-config.yaml` file as follows: ++ +-- +[source,yaml] +---- + # app-config.yaml + proxy: + '/acr/api': + target: 'https://mycontainerregistry.azurecr.io/acr/v1/' + changeOrigin: true + headers: + # If you use Bearer Token for authorization, please replace the 'Basic' with 'Bearer' in the following line. + Authorization: 'Basic ${ACR_AUTH_TOKEN}' + # Change to "false" in case of using self hosted artifactory instance with a self-signed certificate + secure: true +---- +-- + +. Set the authorization using one of the following options: ++ +-- +* Basic authorization: +** Navigate to the ACR portal and go to the *Access Keys* tab. +** Retrieve the username and password of the Admin user and use the https://www.debugbear.com/basic-auth-header-generator[Basic Auth Header Generator tool] or run `echo printf ':' | base64` in a terminal to convert the credentials to a basic token. +** Set the generated token as `ACR_AUTH_TOKEN` in environment variables. + +* OAuth2: + +** Generate bearer access token using the process described in Authenticate with an Azure Container Registry. ++ +You can generate a bearer token using your basic authorization token, for example: ++ +[source,curl] +---- +curl --location 'https://.azurecr.io/oauth2/token?scope=repository%3A*%3A*&service=.azurecr.io' \ --header 'Authorization: Basic ' +---- + +** Set the generated token as `ACR_AUTH_TOKEN` in environment variables. Also ensure that you replace the `Basic` with `Bearer` in the `app-config.yaml` file. +-- + +. Enable an additional tab on the entity view page using the `packages/app/src/components/catalog/EntityPage.tsx` file as follows: ++ +-- +[source] +---- +/* highlight-add-start */ +import { AcrPage, isAcrAvailable } from '@redhat/backstage-plugin-acr'; + +/* highlight-add-end */ + +const serviceEntityPage = ( + + // ... + {/* highlight-add-start */} + + + + Boolean(isAcrAvailable(e))}> + + + + + + + + {/* highlight-add-end */} + +); +---- +-- + +. Annotate your entity using the following annotations: ++ +-- +[source,yaml] +---- +metadata: + annotations: + 'azure-container-registry/repository-name': `', +---- +-- + +== For users + +=== Using the ACR plugin in {product-short} + +ACR is a front-end plugin that enables you to view information about the container images from your Azure Container Registry in {product-short}. + +.Prerequisites + +* Your {product-short} application is installed and running. +* You have installed the ACR plugin. For installation instructions, see <>. + +.Procedure + +. Open your {product-short} application and select a component from the *Catalog* page. + +. Go to the *ACR* tab. ++ +-- +image::acr-plugin-user1.png[acr-tab] + +The *ACR* tab in the {product-short} UI contains a list of container images and related information, such as *TAG*, *CREATED*, *LAST MODIFIED*, and *MANIFEST*. +-- diff --git a/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-admin.adoc new file mode 100644 index 0000000000..41e5294fd0 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-admin.adoc @@ -0,0 +1,78 @@ += Enabling the Argo CD plugin + +You can use the Argo CD plugin to visualize the Continuous Delivery (CD) workflows in OpenShift GitOps. This plugin provides a visual overview of the application’s status, deployment details, commit message, author of the commit, container image promoted to environment and deployment history. + +.Prerequisites + +* Add Argo CD instance information to your `app-config.yaml` configmap as shown in the following example: + ++ +[source,yaml] +---- +argocd: + appLocatorMethods: + - type: 'config' + instances: + - name: argoInstance1 + url: https://argoInstance1.com + username: ${ARGOCD_USERNAME} + password: ${ARGOCD_PASSWORD} + - name: argoInstance2 + url: https://argoInstance2.com + username: ${ARGOCD_USERNAME} + password: ${ARGOCD_PASSWORD} +---- + +* Add the following annotation to the entity’s `catalog-info.yaml` file to identify the Argo CD applications. + ++ +[source,yaml] +---- +annotations: + ... + # The label that Argo CD uses to fetch all the applications. The format to be used is label.key=label.value. For example, rht-gitops.com/janus-argocd=quarkus-app. + + argocd/app-selector: '${ARGOCD_LABEL_SELECTOR}' +---- + +* (Optional) Add the following annotation to the entity’s `catalog-info.yaml` file to switch between Argo CD instances as shown in the following example: + ++ +[source,yaml] +---- + annotations: + ... + # The Argo CD instance name used in `app-config.yaml`. + + argocd/instance-name: '${ARGOCD_INSTANCE}' +---- + ++ +[NOTE] +==== +If you do not set this annotation, the Argo CD plugin defaults to the first Argo CD instance configured in `app-config.yaml`. +==== + +.Procedure + +. Add the following to your dynamic-plugins ConfigMap to enable the Argo CD plugin. ++ +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic + disabled: false + - package: ./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd + disabled: false +---- + +[role="_additional-resources"] +.Additional resources + +* The package path, scope, and name of the {company-name} ArgoCD plugin has changed since 1.2. For more information, see link:{release-notes-url}#removed-functionality-rhidp-4293[Breaking Changes] in the _{rn-product-title}_. + +* For more information on installing dynamic plugins, see link:{installing-and-viewing-dynamic-plugins-url}[{installing-and-viewing-dynamic-plugins-title}]. diff --git a/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-readme.adoc new file mode 100644 index 0000000000..b91a49f436 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-readme.adoc @@ -0,0 +1,36 @@ += Enabling and configuring Argo CD plugin + +You can use the Argo CD plugin to visualize the Continuous Delivery (CD) workflows in OpenShift GitOps. This plugin provides a visual overview of the application’s status, deployment details, commit message, author of the commit, container image promoted to environment and deployment history. + +== Using the Argo CD plugin + +.Prerequisites + +* You have enabled the Argo CD plugin in {product} {product-very-short}. + +.Procedures + +. Select the *Catalog* tab and choose the component that you want to use. + +. Select the *CD* tab to view insights into deployments managed by Argo CD. + ++ +image::rhdh-plugins-reference/argocd.png[CD tab Argo CD] + +. Select an appropriate card to view the deployment details (for example, commit message, author name, and deployment history). + ++ +image::rhdh-plugins-reference/sidebar.png[Sidebar] + +.. Click the link icon (image:rhdh-plugins-reference/link.png[Link icon]) to open the deployment details in Argo CD. + +. Select the *Overview* tab and navigate to the Deployment summary section to review the summary of your application's deployment across namespaces. Additionally, select an appropriate Argo CD app to open the deployment details in Argo CD, or select a commit ID from the Revision column to review the changes in GitLab or GitHub. + ++ +image::rhdh-plugins-reference/deployment_summary.png[Deployment summary] + + +[role="_additional-resources"] +.Additional resources + +* For more information on dynamic plugins, see link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.2/html/configuring_plugins_in_red_hat_developer_hub/rhdh-installing-dynamic-plugins[Dynamic plugin installation]. diff --git a/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-user.adoc b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-user.adoc new file mode 100644 index 0000000000..2f707b8760 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/argocd/argocd-plugin-user.adoc @@ -0,0 +1,34 @@ += Using the Argo CD plugin + +You can use the Argo CD plugin to visualize the Continuous Delivery (CD) workflows in OpenShift GitOps. This plugin provides a visual overview of the application’s status, deployment details, commit message, author of the commit, container image promoted to environment and deployment history. + +.Prerequisites + +* You have enabled the Argo CD plugin in {product} {product-very-short}. + +.Procedures + +. Select the *Catalog* tab and choose the component that you want to use. + +. Select the *CD* tab to view insights into deployments managed by Argo CD. + ++ +image::rhdh-plugins-reference/argocd.png[CD tab Argo CD] + +. Select an appropriate card to view the deployment details (for example, commit message, author name, and deployment history). + ++ +image::rhdh-plugins-reference/sidebar.png[Sidebar] + +.. Click the link icon (image:rhdh-plugins-reference/link.png[Link icon]) to open the deployment details in Argo CD. + +. Select the *Overview* tab and navigate to the Deployment summary section to review the summary of your application's deployment across namespaces. Additionally, select an appropriate Argo CD app to open the deployment details in Argo CD, or select a commit ID from the Revision column to review the changes in GitLab or GitHub. + ++ +image::rhdh-plugins-reference/deployment_summary.png[Deployment summary] + + +[role="_additional-resources"] +.Additional resources + +* For more information on dynamic plugins, see link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.2/html/configuring_plugins_in_red_hat_developer_hub/rhdh-installing-dynamic-plugins[Dynamic plugin installation]. diff --git a/artifacts/rhdh-plugins-reference/attributes.adoc b/artifacts/rhdh-plugins-reference/attributes.adoc new file mode 120000 index 0000000000..cf0dc4a954 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/attributes.adoc @@ -0,0 +1 @@ +../../artifacts/attributes.adoc \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/images b/artifacts/rhdh-plugins-reference/images new file mode 120000 index 0000000000..4dc4d26928 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/images @@ -0,0 +1 @@ +../../images/rhdh-plugins-reference/ \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..aba107f535 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-install-dynamic-plugin.adoc @@ -0,0 +1,15 @@ +.Procedure +{product-short} dynamically installs the Jfrog Artifactory plugin. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-jfrog-artifactory + disabled: false +---- + +.Configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-plugin-readme.adoc new file mode 100644 index 0000000000..fa24848e18 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-plugin-readme.adoc @@ -0,0 +1,135 @@ += Jfrog Artifactory + +The Jfrog Artifactory plugin displays information about your container images within the Jfrog Artifactory registry. + +[IMPORTANT] +==== +The Jfrog Artifactory plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +The Jfrog Artifactory plugin displays information about your container images within the Jfrog Artifactory registry. + +== For administrators + +=== Installing and configuring the Jfrog Artifactory plugin + +ifeval::[{doc-show-dynamic-content} == true] +include::../../../artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-install-dynamic-plugin.adoc[leveloffset=+2] +endif::[] + +ifeval::[{doc-show-dynamic-content} == false] +The Red Hat Plug-ins for Backstage (RHPIB) packages are hosted in a separate NPM registry, which is maintained by Red Hat. To use these packages, you must adjust your NPM configuration to pull the `@redhat` scoped packages: + +[source] +---- +# update your .npmrc or .yarnrc file +yarn config set "@redhat:registry" https://npm.registry.redhat.com +# then pull a package +yarn add @redhat/backstage-plugin-quay +---- + +For more information, see link:https://docs.npmjs.com/cli/v9/configuring-npm/npmrc[npm docs]. + +Creating a `.npmrc` file ensures that all the packages are scoped under `@redhat` and are fetched from link:https://npm.registry.redhat.com/[Red Hat's NPM registry], while the rest dependencies remain sourced from other link:registry.npmjs.org[registry]. + +.Procedure + +. Run the following command to install the Jfrog Artifactory plugin: ++ +-- +[source,console] +---- +yarn workspace app add @redhat/backstage-plugin-jfrog-artifactory +---- +-- +endif::[] + +. Set the proxy to the desired Artifactory server in the `app-config.yaml` file as follows: ++ +-- +[source,yaml] +---- +proxy: + '/jfrog-artifactory/api': + target: 'http://:8082' # or https://.jfrog.io + headers: + # Authorization: 'Bearer ' + # Change to "false" in case of using self hosted artifactory instance with a self-signed certificate + secure: true +---- +-- + +ifeval::[{doc-show-dynamic-content} == false] +. Enable the *JFROG ARTIFACTORY* tab on the entity view page in `packages/app/src/components/catalog/EntityPage.tsx` file as follows: ++ +-- +[source] +---- +/* highlight-add-start */ +import { + isJfrogArtifactoryAvailable, + JfrogArtifactoryPage, +} from "@redhat/backstage-plugin-jfrog-artifactory"; + +/* highlight-add-end */ + +const serviceEntityPage = ( + + // ... + {/* highlight-add-start */} + + + + {/* highlight-add-end */} + +); +---- +-- + +. Annotate your entity using the following annotations: ++ +-- +[source,yaml] +---- +metadata: + annotations: + 'jfrog-artifactory/image-name': '' +---- +-- +endif::[] + +== For users + +=== Using the Jfrog Artifactory plugin in {product-short} + +Jfrog Artifactory is a front-end plugin that enables you to view the information about the container images that are available in your Jfrog Artifactory registry. + +.Prerequisites + +* Your {product-short} application is installed and running. +* You have installed the Jfrog Artifactory plugin. For installation and configuration steps, see <>. + +.Procedure + +. Open your {product-short} application and select a component from the *Catalog* page. + +. Go to the *JFROG ARTIFACTORY* tab. ++ +-- +image::jfrog-plugin-user1.png[jfrog-tab] + +The *JFROG ARTIFACTORY* tab contains a list of container images and related information, such as *VERSION*, *REPOSITORIES*, *MANIFEST*, *MODIFIED*, and *SIZE*. +-- + + + diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc new file mode 100644 index 0000000000..1c6a5c2632 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc @@ -0,0 +1,151 @@ +[id="rhdh-keycloak_{context}"] += Installing and configuring Keycloak + +The Keycloak backend plugin, which integrates Keycloak into {product-short}, has the following capabilities: + +* Synchronization of Keycloak users in a realm. +* Synchronization of Keycloak groups and their users in a realm. + +== Installation + +The Keycloak plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-keycloak-backend-dynamic + disabled: false +---- + +== Basic configuration +To enable the Keycloak plugin, you must set the following environment variables: + +* `KEYCLOAK_BASE_URL` + +* `KEYCLOAK_LOGIN_REALM` + +* `KEYCLOAK_REALM` + +* `KEYCLOAK_CLIENT_ID` + +* `KEYCLOAK_CLIENT_SECRET` + +== Advanced configuration + +.Schedule configuration +You can configure a schedule in the `app-config.yaml` file, as follows: + +[source,yaml] +---- + catalog: + providers: + keycloakOrg: + default: + # ... + # highlight-add-start + schedule: # optional; same options as in TaskScheduleDefinition + # supports cron, ISO duration, "human duration" as used in code + frequency: { minutes: 1 } + # supports ISO duration, "human duration" as used in code + timeout: { minutes: 1 } + initialDelay: { seconds: 15 } + # highlight-add-end +---- + +[NOTE] +==== +If you have made any changes to the schedule in the `app-config.yaml` file, then restart to apply the changes. +==== + +.Keycloak query parameters + +You can override the default Keycloak query parameters in the `app-config.yaml` file, as follows: + +[source,yaml] +---- + catalog: + providers: + keycloakOrg: + default: + # ... + # highlight-add-start + userQuerySize: 500 # Optional + groupQuerySize: 250 # Optional + # highlight-add-end +---- + +Communication between {product-short} and Keycloak is enabled by using the Keycloak API. Username and password, or client credentials are supported authentication methods. + + +The following table describes the parameters that you can configure to enable the plugin under `catalog.providers.keycloakOrg.` object in the `app-config.yaml` file: + +|=== +| Name | Description | Default Value | Required + +| `baseUrl` +| Location of the Keycloak server, such as `pass:c[https://localhost:8443/auth]`. Note that the newer versions of Keycloak omit the `/auth` context path. +| "" +| Yes + +| `realm` +| Realm to synchronize +| `master` +| No + +| `loginRealm` +| Realm used to authenticate +| `master` +| No + +| `username` +| Username to authenticate +| "" +| Yes if using password based authentication + +| `password` +| Password to authenticate +| "" +| Yes if using password based authentication + +| `clientId` +| Client ID to authenticate +| "" +| Yes if using client credentials based authentication + +| `clientSecret` +| Client Secret to authenticate +| "" +| Yes if using client credentials based authentication + +| `userQuerySize` +| Number of users to query at a time +| `100` +| No + +| `groupQuerySize` +| Number of groups to query at a time +| `100` +| No +|=== + +When using client credentials, the access type must be set to `confidential` and service accounts must be enabled. You must also add the following roles from the `realm-management` client role: + +* `query-groups` +* `query-users` +* `view-users` + +== Limitations + +If you have self-signed or corporate certificate issues, you can set the following environment variable before starting {product-short}: + +`NODE_TLS_REJECT_UNAUTHORIZED=0` + + +[NOTE] +==== +The solution of setting the environment variable is not recommended. +==== \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc new file mode 100644 index 0000000000..005e544e72 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc @@ -0,0 +1,180 @@ +[id="rhdh-keycloak_{context}"] += Installation and configuration of Keycloak + +The Keycloak backend plugin, which integrates Keycloak into {product-short}, has the following capabilities: + +* Synchronization of Keycloak users in a realm. +* Synchronization of Keycloak groups and their users in a realm. + +== For administrators + +=== Installation + +The Keycloak plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-keycloak-backend-dynamic + disabled: false +---- + +=== Basic configuration +To enable the Keycloak plugin, you must set the following environment variables: + +* `KEYCLOAK_BASE_URL` + +* `KEYCLOAK_LOGIN_REALM` + +* `KEYCLOAK_REALM` + +* `KEYCLOAK_CLIENT_ID` + +* `KEYCLOAK_CLIENT_SECRET` + +=== Advanced configuration + +.Schedule configuration +You can configure a schedule in the `app-config.yaml` file, as follows: + +[source,yaml] +---- + catalog: + providers: + keycloakOrg: + default: + # ... + # highlight-add-start + schedule: # optional; same options as in TaskScheduleDefinition + # supports cron, ISO duration, "human duration" as used in code + frequency: { minutes: 1 } + # supports ISO duration, "human duration" as used in code + timeout: { minutes: 1 } + initialDelay: { seconds: 15 } + # highlight-add-end +---- + +[NOTE] +==== +If you have made any changes to the schedule in the `app-config.yaml` file, then restart to apply the changes. +==== + +.Keycloak query parameters + +You can override the default Keycloak query parameters in the `app-config.yaml` file, as follows: + +[source,yaml] +---- + catalog: + providers: + keycloakOrg: + default: + # ... + # highlight-add-start + userQuerySize: 500 # Optional + groupQuerySize: 250 # Optional + # highlight-add-end +---- + +Communication between {product-short} and Keycloak is enabled by using the Keycloak API. Username and password, or client credentials are supported authentication methods. + + +The following table describes the parameters that you can configure to enable the plugin under `catalog.providers.keycloakOrg.` object in the `app-config.yaml` file: + +|=== +| Name | Description | Default Value | Required + +| `baseUrl` +| Location of the Keycloak server, such as `pass:c[https://localhost:8443/auth]`. Note that the newer versions of Keycloak omit the `/auth` context path. +| "" +| Yes + +| `realm` +| Realm to synchronize +| `master` +| No + +| `loginRealm` +| Realm used to authenticate +| `master` +| No + +| `username` +| Username to authenticate +| "" +| Yes if using password based authentication + +| `password` +| Password to authenticate +| "" +| Yes if using password based authentication + +| `clientId` +| Client ID to authenticate +| "" +| Yes if using client credentials based authentication + +| `clientSecret` +| Client Secret to authenticate +| "" +| Yes if using client credentials based authentication + +| `userQuerySize` +| Number of users to query at a time +| `100` +| No + +| `groupQuerySize` +| Number of groups to query at a time +| `100` +| No +|=== + +When using client credentials, the access type must be set to `confidential` and service accounts must be enabled. You must also add the following roles from the `realm-management` client role: + +* `query-groups` +* `query-users` +* `view-users` + +=== Limitations + +If you have self-signed or corporate certificate issues, you can set the following environment variable before starting {product-short}: + +`NODE_TLS_REJECT_UNAUTHORIZED=0` + + +[NOTE] +==== +The solution of setting the environment variable is not recommended. +==== + +== For users + +=== Import of users and groups in {product-short} using the Keycloak plugin + +After configuring the plugin successfully, the plugin imports the users and groups each time when started. + +[NOTE] +==== +If you set up a schedule, users and groups will also be imported. +==== + +After the first import is complete, you can select *User* to list the users from the catalog page: + +image::rhdh-plugins-reference/users.jpg[catalog-list] + +You can see the list of users on the page: + +image::rhdh-plugins-reference/user-list.jpg[user-list] + +When you select a user, you can see the information imported from Keycloak: + +image::rhdh-plugins-reference/user2.jpg[user-profile] + +You can also select a group, view the list, and select or view the information imported from Keycloak for a group: + +image::rhdh-plugins-reference/group1.jpg[group-profile] diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc new file mode 100644 index 0000000000..cf91a17ba0 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc @@ -0,0 +1,32 @@ +[id="rhdh-keycloak_{context}"] += Using Keycloak + +The Keycloak backend plugin, which integrates Keycloak into {product-short}, has the following capabilities: + +* Synchronization of Keycloak users in a realm. +* Synchronization of Keycloak groups and their users in a realm. + +== Importing users and groups in {product-short} using the Keycloak plugin + +After configuring the plugin successfully, the plugin imports the users and groups each time when started. + +[NOTE] +==== +If you set up a schedule, users and groups will also be imported. +==== + +After the first import is complete, you can select *User* to list the users from the catalog page: + +image::rhdh-plugins-reference/users.jpg[catalog-list] + +You can see the list of users on the page: + +image::rhdh-plugins-reference/user-list.jpg[user-list] + +When you select a user, you can see the information imported from Keycloak: + +image::rhdh-plugins-reference/user2.jpg[user-profile] + +You can also select a group, view the list, and select or view the information imported from Keycloak for a group: + +image::rhdh-plugins-reference/group1.jpg[group-profile] diff --git a/artifacts/rhdh-plugins-reference/modules b/artifacts/rhdh-plugins-reference/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..97af95a3d5 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-install-dynamic-plugin.adoc @@ -0,0 +1,14 @@ +The Nexus Repository Manager plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the disabled property to false as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-nexus-repository-manager + disabled: false +---- + +.Configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-admin.adoc new file mode 100644 index 0000000000..605e414994 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-admin.adoc @@ -0,0 +1,77 @@ +[id="installing-configuring-nexus-plugin"] += Installing and configuring the Nexus Repository Manager plugin + +The Nexus Repository Manager plugin displays the information about your build artifacts in your {product-short} application. The build artifacts are available in the Nexus Repository Manager. + +[IMPORTANT] +==== +The Nexus Repository Manager plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +== Installation +The Nexus Repository Manager plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the disabled property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-nexus-repository-manager + disabled: false +---- + +== Configuration +. Set the proxy to the desired Nexus Repository Manager server in the `app-config.yaml` file as follows: ++ +[source,yaml] +---- +proxy: + '/nexus-repository-manager': + target: 'https://' + headers: + X-Requested-With: 'XMLHttpRequest' + # Uncomment the following line to access a private Nexus Repository Manager using a token + # Authorization: 'Bearer ' + changeOrigin: true + # Change to "false" in case of using self hosted Nexus Repository Manager instance with a self-signed certificate + secure: true +---- + +. Optional: Change the base URL of Nexus Repository Manager proxy as follows: ++ +[source,yaml] +---- +nexusRepositoryManager: + # default path is `/nexus-repository-manager` + proxyPath: /custom-path +---- + +. Optional: Enable the following experimental annotations: ++ +[source,yaml] +---- +nexusRepositoryManager: + experimentalAnnotations: true +---- + +. Annotate your entity using the following annotations: ++ +[source,yaml] +---- +metadata: + annotations: + # insert the chosen annotations here + # example + nexus-repository-manager/docker.image-name: `/`, +---- + +//Cannot xref across titles. Convert xref to a link. +//For additional information about installing and configuring dynamic plugins, see the xref:rhdh-installing-dynamic-plugins[] section. \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-readme.adoc new file mode 100644 index 0000000000..46f67acf1b --- /dev/null +++ b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-readme.adoc @@ -0,0 +1,102 @@ += Installation and configuration of Nexus Repository Manager + +The Nexus Repository Manager plugin displays the information about your build artifacts in your {product-short} application. The build artifacts are available in the Nexus Repository Manager. + +[IMPORTANT] +==== +The Nexus Repository Manager plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +== For administrators + +=== Installing and configuring the Nexus Repository Manager plugin + +.Installation +The Nexus Repository Manager plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the disabled property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-nexus-repository-manager + disabled: false +---- + +.Configuration +. Set the proxy to the desired Nexus Repository Manager server in the `app-config.yaml` file as follows: ++ +[source,yaml] +---- +proxy: + '/nexus-repository-manager': + target: 'https://' + headers: + X-Requested-With: 'XMLHttpRequest' + # Uncomment the following line to access a private Nexus Repository Manager using a token + # Authorization: 'Bearer ' + changeOrigin: true + # Change to "false" in case of using self hosted Nexus Repository Manager instance with a self-signed certificate + secure: true +---- + +. Optional: Change the base URL of Nexus Repository Manager proxy as follows: ++ +[source,yaml] +---- +nexusRepositoryManager: + # default path is `/nexus-repository-manager` + proxyPath: /custom-path +---- + +. Optional: Enable the following experimental annotations: ++ +[source,yaml] +---- +nexusRepositoryManager: + experimentalAnnotations: true +---- + +. Annotate your entity using the following annotations: ++ +[source,yaml] +---- +metadata: + annotations: + # insert the chosen annotations here + # example + nexus-repository-manager/docker.image-name: `/`, +---- + +//Cannot xref across titles. Convert xref to a link. +//For additional information about installing and configuring dynamic plugins, see the xref:rhdh-installing-dynamic-plugins[] section. + +== For users + +=== Using the Nexus Repository Manager plugin in {product-short} + +The Nexus Repository Manager is a front-end plugin that enables you to view the information about build artifacts. + +.Prerequisites + +- Your {product-short} application is installed and running. +- You have installed the Nexus Repository Manager plugin. For the installation process, see <>. + +.Procedure + +1. Open your {product-short} application and select a component from the *Catalog* page. +2. Go to the *BUILD ARTIFACTS* tab. ++ +-- +The *BUILD ARTIFACTS* tab contains a list of build artifacts and related information, such as *VERSION*, *REPOSITORY*, *REPOSITORY TYPE*, *MANIFEST*, *MODIFIED*, and *SIZE*. + +image::rhdh-plugins-reference/nexus-repository-manager.png[nexus-repository-manager-tab] +-- diff --git a/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-user.adoc b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-user.adoc new file mode 100644 index 0000000000..6c27f14dc4 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-user.adoc @@ -0,0 +1,33 @@ += Using the Nexus Repository Manager plugin + +The Nexus Repository Manager plugin displays the information about your build artifacts in your {product-short} application. The build artifacts are available in the Nexus Repository Manager. + +[IMPORTANT] +==== +The Nexus Repository Manager plugin is a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +The Nexus Repository Manager is a front-end plugin that enables you to view the information about build artifacts. + +.Prerequisites + +- Your {product-short} application is installed and running. +- You have installed the Nexus Repository Manager plugin. +//For the installation process, see xref:installing-configuring-nexus-plugin[Installing and configuring the Nexus Repository Manager plugin]. + +.Procedure + +1. Open your {product-short} application and select a component from the *Catalog* page. +2. Go to the *BUILD ARTIFACTS* tab. ++ +-- +The *BUILD ARTIFACTS* tab contains a list of build artifacts and related information, such as *VERSION*, *REPOSITORY*, *REPOSITORY TYPE*, *MANIFEST*, *MODIFIED*, and *SIZE*. + +image::rhdh-plugins-reference/nexus-repository-manager.png[nexus-repository-manager-tab] +-- diff --git a/artifacts/rhdh-plugins-reference/ocm/ocm-backend-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/ocm/ocm-backend-plugin-readme.adoc new file mode 100644 index 0000000000..74a63c1ade --- /dev/null +++ b/artifacts/rhdh-plugins-reference/ocm/ocm-backend-plugin-readme.adoc @@ -0,0 +1,5 @@ += Open Cluster Management plugin for Backstage + +The Open Cluster Management (OCM) plugin integrates your Backstage instance with OCM. + +For more information about OCM plugin, see the https://github.com/janus-idp/backstage-plugins/tree/main/plugins/ocm[Open Cluster Management plugin documentation] on GitHub. diff --git a/artifacts/rhdh-plugins-reference/ocm/ocm-common-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/ocm/ocm-common-plugin-readme.adoc new file mode 100644 index 0000000000..74a63c1ade --- /dev/null +++ b/artifacts/rhdh-plugins-reference/ocm/ocm-common-plugin-readme.adoc @@ -0,0 +1,5 @@ += Open Cluster Management plugin for Backstage + +The Open Cluster Management (OCM) plugin integrates your Backstage instance with OCM. + +For more information about OCM plugin, see the https://github.com/janus-idp/backstage-plugins/tree/main/plugins/ocm[Open Cluster Management plugin documentation] on GitHub. diff --git a/artifacts/rhdh-plugins-reference/ocm/ocm-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/ocm/ocm-plugin-readme.adoc new file mode 100644 index 0000000000..504259a992 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/ocm/ocm-plugin-readme.adoc @@ -0,0 +1,439 @@ += Open Cluster Management plugin for Backstage + +The Open Cluster Management (OCM) plugin integrates your Backstage instance with the `MultiClusterHub` and `MultiCluster` engines of OCM. + +== Capabilities + +The OCM plugin has the following capabilities: + +* All clusters represented as `ManagedCluster` in `MultiClusterHub` or MCE are discovered and imported into the Backstage catalog, such as: + ** Entity is defined as `kind: Resource` with `spec.type` set to `kubernetes-cluster`. + ** Links to the OpenShift Container Platform (OCP) console, OCM console, and OpenShift Cluster Manager are provided in `metadata.links`. +* Shows real-time data from OCM on the Resource entity page, including: + ** Cluster current status (up or down) + ** Cluster nodes status (up or down) + ** Cluster details (console link, OCP, and Kubernetes version) + ** Details about available compute resources on the cluster + +== For administrators + +=== Installation + +The Red Hat Plug-ins for Backstage (RHPIB) packages are hosted in a separate NPM registry, which is maintained by Red Hat. To use these packages, you must adjust your NPM configuration to pull the `@redhat` scoped packages: + +[source] +---- +# update your .npmrc or .yarnrc file +yarn config set "@redhat:registry" https://npm.registry.redhat.com +# then pull a package +yarn add @redhat/backstage-plugin-quay +---- + +For more information, see link:https://docs.npmjs.com/cli/v9/configuring-npm/npmrc[npm docs]. + +Creating a `.npmrc` file ensures that all the packages are scoped under `@redhat` and are fetched from link:https://npm.registry.redhat.com/[Red Hat's NPM registry], while the rest dependencies remain sourced from other link:registry.npmjs.org[registry]. + +Using this configuration, you can proceed with the installation of the individual packages. + +The OCM plugin is composed of two packages, including: + +* `@redhat/backstage-plugin-ocm-backend` package connects the Backstage server to OCM. For setup process, see <> section. +* The `@redhat/backstage-plugin-ocm` package, which contains frontend components requires the `\*-backend` package to be present and properly set up. For detailed instructions on setting up the backend, refer to the <> section. + +[NOTE] +==== +If you are interested in Resource discovery and do not want any of the front-end components, then you can install and configure the `@redhat/backstage-plugin-ocm-backend` package only. +==== + + +==== Prerequisites + +* OCM is deployed and configured on a Kubernetes cluster. +* https://backstage.io/docs/features/kubernetes[Kubernetes plugin for Backstage] is installed. +* A `ClusterRole` is granted to `ServiceAccount` accessing the hub cluster as follows: ++ +-- +[source,yaml] +---- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: backstage-ocm-plugin + rules: + - apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + verbs: + - get + - watch + - list + - apiGroups: + - internal.open-cluster-management.io + resources: + - managedclusterinfos + verbs: + - get + - watch + - list +---- +-- + +==== Setting up the OCM backend package + +. Install the OCM backend plugin using the following command: ++ +-- +[source,console] +---- +yarn workspace backend add @redhat/backstage-plugin-ocm-backend +---- +-- + +. Configure the OCM backend plugin using one of the following configurations: +** The OCM configuration provides the information about your hub. To use the OCM configuration, add the following code to your `app-config.yaml` file: ++ +-- +[source,yaml] +---- +`yaml title="app-config.yaml" + catalog: + providers: + ocm: + env: # Key is reflected as provider ID. Defines and claims plugin instance ownership of entities + name: # Name that the hub cluster will assume in Backstage Catalog (in OCM this is always local-cluster which can be confusing) + url: # Url of the hub cluster API endpoint + serviceAccountToken: # Token used for querying data from the hub + skipTLSVerify: # Skip TLS certificate verification, defaults to false + caData: # Base64-encoded CA bundle in PEM format + +---- +-- + +** If the Backstage Kubernetes plugin is installed and configured to connect to the hub cluster, then you can bind the both hub and Kubernetes configuration by providing the name of the hub in the `app-config.yaml` as follows: ++ +-- +[source,yaml] +---- +```yaml title="app-config.yaml" + kubernetes: + serviceLocatorMethod: + type: 'multiTenant' + clusterLocatorMethods: + - type: 'config' + clusters: + # highlight-next-line + - name: + # ... + + catalog: + providers: + ocm: + env: # Key is reflected as provider ID. Defines and claims plugin instance ownership of entities + # highlight-next-line + kubernetesPluginRef: # Match the cluster name in kubernetes plugin config +---- + +Ensure that the Backstage uses a `ServiceAccount` token and the required permissions are granted as mentioned previously. + +This is useful when you already use a Kubernetes plugin in your Backstage instance. Also, the hub cluster must be connected using the `ServiceAccount`. + +For more information about the configuration, see https://backstage.io/docs/features/kubernetes/configuration#configuring-kubernetes-clusters[Backstage Kubernetes plugin] documentation. +-- + +. Create a new plugin instance in `packages/backend/src/plugins/ocm.ts` file as follows: ++ +-- +[source] +---- +```ts title="packages/backend/src/plugins/ocm.ts" + import { Router } from 'express'; + + import { createRouter } from '@redhat/backstage-plugin-ocm-backend'; + + import { PluginEnvironment } from '../types'; + + export default async function createPlugin( + env: PluginEnvironment, + ): Promise { + return await createRouter({ + logger: env.logger, + config: env.config, + }); + } +---- +-- + +. Import and plug the new instance into `packages/backend/src/index.ts` file: ++ +-- +[source] +---- +```ts title="packages/backend/src/index.ts" + /* highlight-add-next-line */ + import ocm from './plugins/ocm'; + + async function main() { + // ... + const createEnv = makeCreateEnv(config); + // ... + /* highlight-add-next-line */ + const ocmEnv = useHotMemoize(module, () => createEnv('ocm')); + // ... + const apiRouter = Router(); + // ... + /* highlight-add-next-line */ + apiRouter.use('/ocm', await ocm(ocmEnv)); + // ... + } +``` +---- +-- + +. Import the cluster `Resource` entity provider into the `catalog` plugin in the `packages/backend/src/plugins/catalog.ts` file. The scheduler also needs to be configured. Two configurations are possible here: + .. Configure the scheduler inside the `app-config.yaml`: ++ +-- +[source,yaml] +---- +```yaml title="app-config.yaml" + catalog: + providers: + ocm: + env: + # ... + # highlight-add-start + schedule: # optional; same options as in TaskScheduleDefinition + # supports cron, ISO duration, "human duration" as used in code + frequency: { minutes: 1 } + # supports ISO duration, "human duration" as used in code + timeout: { minutes: 1 } + # highlight-add-end ++ +---- + +and then use the configured scheduler + +[source] +---- +```ts title="packages/backend/src/index.ts" + /* highlight-add-next-line */ + import { ManagedClusterProvider } from '@redhat/backstage-plugin-ocm-backend'; + + export default async function createPlugin( + env: PluginEnvironment, + ): Promise { + const builder = await CatalogBuilder.create(env); + // ... + /* highlight-add-start */ + const ocm = ManagedClusterProvider.fromConfig(env.config, { + logger: env.logger, + scheduler: env.scheduler, + }); + builder.addEntityProvider(ocm); + /* highlight-add-start */ + // ... + } +---- +-- + +.. Add a schedule directly inside the `packages/backend/src/plugins/catalog.ts` file ++ +-- +[source] +---- +```ts title="packages/backend/src/index.ts" + /* highlight-add-next-line */ + import { ManagedClusterProvider } from '@redhat/backstage-plugin-ocm-backend'; + + export default async function createPlugin( + env: PluginEnvironment, + ): Promise { + const builder = await CatalogBuilder.create(env); + // ... + /* highlight-add-start */ + const ocm = ManagedClusterProvider.fromConfig(env.config, { + logger: env.logger, + schedule: env.scheduler.createScheduledTaskRunner({ + frequency: { minutes: 1 }, + timeout: { minutes: 1 }, + }), + }); + builder.addEntityProvider(ocm); + /* highlight-add-start */ + // ... + } +---- +-- + +. Optional: Configure the default owner for the cluster entities in the catalog for a specific environment. For example, use the following code to set `foo` as the owner for clusters from `env` in the `app-config.yaml` catalog section: ++ +-- +[source,yaml] +---- +`yaml title="app-config.yaml" + catalog: + providers: + ocm: + env: + # highlight-next-line + owner: user:foo +---- + +For more information about the default owner configuration, see https://backstage.io/docs/features/software-catalog/references/#string-references[upstream string references documentation]. +-- + +==== Setting up the OCM frontend package + +. Install the OCM frontend plugin using the following command: ++ +-- +[source,console] +---- +yarn workspace app add @redhat/backstage-plugin-ocm +---- +-- + +. Select the components that you want to use, such as: + +** `OcmPage`: This is a standalone page or dashboard displaying all clusters as tiles. You can add `OcmPage` to `packages/app/src/App.tsx` file as follows: ++ +-- +[source] +---- +```tsx title="packages/app/src/App.tsx" + /* highlight-add-next-line */ + import { OcmPage } from '@redhat/backstage-plugin-ocm'; + + const routes = ( + + {/* ... */} + {/* highlight-add-next-line */} + } />} /> + + ); +---- + +You can also update navigation in `packages/app/src/components/Root/Root.tsx` as follows: + +[source] +---- +```tsx title="packages/app/src/components/Root/Root.tsx" + /* highlight-add-next-line */ + import StorageIcon from '@material-ui/icons/Storage'; + + export const Root = ({ children }: PropsWithChildren<{}>) => ( + + + }> + {/* ... */} + {/* highlight-add-next-line */} + + + {/* ... */} + + {children} + + ); +---- +-- + +** `ClusterContextProvider`: This component is a React context provided for OCM data, which is related to the current entity. The `ClusterContextProvider` component is used to display any data on the React components mentioned in `packages/app/src/components/catalog/EntityPage.tsx`: ++ +-- +[source] +---- +```tsx title="packages/app/src/components/catalog/EntityPage.tsx" + /* highlight-add-start */ + import { + ClusterAvailableResourceCard, + ClusterContextProvider, + ClusterInfoCard, + } from '@redhat/backstage-plugin-ocm'; + + /* highlight-add-end */ + + const isType = (types: string | string[]) => (entity: Entity) => { + if (!entity?.spec?.type) { + return false; + } + return typeof types === 'string' + ? entity?.spec?.type === types + : types.includes(entity.spec.type as string); + }; + + export const resourcePage = ( + + {/* ... */} + {/* highlight-add-start */} + + + + + + + + + + + + + + + + + {/* highlight-add-end */} + + ); + + export const entityPage = ( + + {/* ... */} + {/* highlight-add-next-line */} + + + ); +---- + +In the previous codeblock, you can place the context provider into your `Resource` entity renderer, which is usually available in `packages/app/src/components/catalog/EntityPage.tsx` or in an imported component. + + ** ``: This is an entity component displaying details of a cluster in a table: + ** ``: This is an entity component displaying the available resources on a cluster. For example, see https://open-cluster-management.io/concepts/managedcluster/#cluster-heartbeats-and-status[`.status.capacity`] of the `ManagedCluster` resource. +-- + +== For users + +=== Using the OCM plugin in Backstage + +The OCM plugin integrates your Backstage instance with multi-cluster engines and displays real-time data from OCM. + +==== Prerequisites + +* Your Backstage application is installed and running. +* You have installed the OCM plugin. For the installation process, see <>. + +==== Procedure + +. Open your Backstage application. +. Click the *Clusters* tab from the left-side panel to view the *Managed Clusters* page. ++ +-- +The *Managed Clusters* page displays the list of clusters with additional information, such as status, infrastructure provider, associated OpenShift version, and available nodes. + +image::ocm-plugin-user1.png[ocm-plugin-ui] + +You can also upgrade the OpenShift version for a cluster using the *Upgrade available* option in the *VERSION* column. +-- + +. Select a cluster from the *Managed Clusters* to view the related cluster information. ++ +-- +You are redirected to the cluster-specific page, which consists of: + + ** *Cluster Information*, such as name, status, accessed Kubernetes version, associated OpenShift ID and version, and accessed platform. + ** *Available* cluster capacity, including CPU cores, memory size, and number of pods. + ** *Related Links*, which enable you to access different consoles directly, such as OpenShift Console, OCM Console, and OpenShift Cluster Manager Console. + ** *Relations* card, which displays the visual representation of the cluster and associated dependencies. + +image::ocm-plugin-user2.png[ocm-plugin-ui] +-- diff --git a/artifacts/rhdh-plugins-reference/quay/quay-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/quay/quay-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..7c26e4a40f --- /dev/null +++ b/artifacts/rhdh-plugins-reference/quay/quay-install-dynamic-plugin.adoc @@ -0,0 +1,15 @@ +.Installation +The Quay plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-quay + disabled: false +---- + +.Configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/quay/quay-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/quay/quay-plugin-readme.adoc new file mode 100644 index 0000000000..c0dd3f9b2a --- /dev/null +++ b/artifacts/rhdh-plugins-reference/quay/quay-plugin-readme.adoc @@ -0,0 +1,127 @@ += Quay + +The Quay plugin displays the information about your container images within the Quay registry in your {product-short} application. + +== For administrators + +[[installation]] +=== Installing and configuring the Quay plugin + +ifeval::[{doc-show-dynamic-content} == true] +include::../../../artifacts/rhdh-plugins-reference/acr/acr-install-dynamic-plugin.adoc[leveloffset=+2] +endif::[] + +ifeval::[{doc-show-dynamic-content} == false] +The Red Hat Plug-ins for Backstage (RHPIB) packages are hosted in a separate NPM registry, which is maintained by Red Hat. To use these packages, you must adjust your NPM configuration to pull the `@redhat` scoped packages: + +[source] +---- +# update your .npmrc or .yarnrc file +yarn config set "@redhat:registry" https://npm.registry.redhat.com +# then pull a package +yarn add @redhat/backstage-plugin-quay +---- + +For more information, see link:https://docs.npmjs.com/cli/v9/configuring-npm/npmrc[npm docs]. + +Creating a `.npmrc` file ensures that all the packages are scoped under `@redhat` and are fetched from link:https://npm.registry.redhat.com/[Red Hat's NPM registry], while the rest dependencies remain sourced from other link:registry.npmjs.org[registry]. + +You can now install the Quay plugin using the following command: + +[source,console] +---- +yarn workspace app add @redhat/backstage-plugin-quay +---- +endif::[] + + +. Set the proxy to the desired Quay server in the `app-config.yaml` file as follows: ++ +-- +[source,yaml] +---- + ```yaml title="app-config.yaml" + proxy: + '/quay/api': + target: 'https://quay.io' + headers: + X-Requested-With: 'XMLHttpRequest' + # Uncomment the following line to access a private Quay Repository using a token + # Authorization: 'Bearer ' + changeOrigin: true + # Change to "false" in case of using self hosted quay instance with a self-signed certificate + secure: true + + quay: + # The UI url for Quay, used to generate the link to Quay + uiUrl: 'https://quay.io' + ``` +---- +-- + +ifeval::[{doc-show-dynamic-content} == false] +. Enable an additional tab on the entity view page in `packages/app/src/components/catalog/EntityPage.tsx`: ++ +-- +[source] +---- + ```tsx title="packages/app/src/components/catalog/EntityPage.tsx" + /* highlight-add-next-line */ + import { isQuayAvailable, QuayPage } from '@redhat/backstage-plugin-quay'; + + const serviceEntityPage = ( + + {/* ... */} + {/* highlight-add-next-line */} + + + + + ); + ``` +---- +-- +endif::[] + +. Annotate your entity with the following annotations: ++ +-- +[source,yaml] +---- + ```yaml title="catalog-info.yaml" + metadata: + annotations: + 'quay.io/repository-slug': `/', + ``` +---- +-- + +== For users + +=== Using the Quay plugin in {product-short} + +Quay is a front-end plugin that enables you to view the information about the container images. + +.Prerequisites + +* Your {product-short} application is installed and running. +* You have installed the Quay plugin. For installation process, see <>. + +.Procedure + +. Open your {product-short} application and select a component from the *Catalog* page. +. Go to the *Image Registry* tab. ++ +The *Image Registry* tab in the {product-short} UI contains a list of container images and related information, such as *TAG*, *LAST MODIFIED*, *SECURITY SCAN*, *SIZE*, *EXPIRES*, and *MANIFEST*. ++ +image::quay-plugin-backstage1.png[quay-tab] + +. If a container image does not pass the security scan, select the security scan value of the image to check the vulnerabilities. ++ +image::quay-plugin-backstage2.png[quay-tab] ++ +The vulnerabilities page displays the associated advisory with a link, severity, package name, and current and fixed versions. ++ +image::quay-plugin-backstage3.png[quay-tab-vulnerabilities] ++ +The advisory link redirects to the Red Hat Security Advisory page that contains detailed information about the advisory, including the solution. diff --git a/artifacts/rhdh-plugins-reference/snip-concious-language.adoc b/artifacts/rhdh-plugins-reference/snip-concious-language.adoc new file mode 120000 index 0000000000..1bd106d7d0 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/snip-concious-language.adoc @@ -0,0 +1 @@ +../../artifacts/snip-conscious-language.adoc \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/tekton/tekton-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/tekton/tekton-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..4b876988ee --- /dev/null +++ b/artifacts/rhdh-plugins-reference/tekton/tekton-install-dynamic-plugin.adoc @@ -0,0 +1,13 @@ +.Installation +{product-short} dynamically installs the Tekton plugin. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-tekton + disabled: false +---- diff --git a/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-admin.adoc new file mode 100644 index 0000000000..2e5d8bba25 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-admin.adoc @@ -0,0 +1,143 @@ +[[installation-and-configuration-tekton]] += Installing and configuring the Tekton plugin + +You can use the Tekton plugin to visualize the results of CI/CD pipeline runs on your Kubernetes or OpenShift clusters. The plugin allows users to visually see high level status of all associated tasks in the pipeline for their applications. + +[[installing-tekton-plugin]] +== Installation + +.Prerequsites +* You have installed and configured the `@backstage/plugin-kubernetes` and `@backstage/plugin-kubernetes-backend` dynamic plugins. +//For more information about installing dynamic plugins, see xref:rhdh-installing-dynamic-plugins[]. +//Cannot xref across titles. Convert xref to a link. + +* You have configured the Kubernetes plugin to connect to the cluster using a `ServiceAccount`. + +* The `ClusterRole` must be granted for custom resources (PipelineRuns and TaskRuns) to the `ServiceAccount` accessing the cluster. ++ +[NOTE] +If you have the RHDH Kubernetes plugin configured, then the `ClusterRole` is already granted. + +* To view the pod logs, you have granted permissions for `pods/log`. + +* You can use the following code to grant the `ClusterRole` for custom resources and pod logs: ++ +-- +[source,yaml] +---- +kubernetes: + ... + customResources: + - group: 'tekton.dev' + apiVersion: 'v1' + plural: 'pipelineruns' + - group: 'tekton.dev' + apiVersion: 'v1' + + + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + - apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list + - watch + ... + - apiGroups: + - tekton.dev + resources: + - pipelineruns + - taskruns + verbs: + - get + - list +---- +-- ++ +You can use the prepared manifest for a read-only `ClusterRole`, which provides access for both Kubernetes plugin and Tekton plugin. + +* Add the following annotation to the entity's `catalog-info.yaml` file to identify whether an entity contains the Kubernetes resources: ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-id: +---- +-- + +* You can also add the `backstage.io/kubernetes-namespace` annotation to identify the Kubernetes resources using the defined namespace. ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-namespace: +---- +-- + +* Add the following annotation to the `catalog-info.yaml` file of the entity to enable the Tekton related features in RHDH. The value of the annotation identifies the name of the RHDH entity: ++ +-- +[source,yaml] +---- +annotations: + ... + + janus-idp.io/tekton : +---- +-- + +* Add a custom label selector, which RHDH uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations. ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end' +---- +-- + +* Add the following label to the resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity: ++ +-- +[source,yaml] +---- +labels: + ... + + backstage.io/kubernetes-id: +---- +-- ++ +[NOTE] +When you use the label selector, the mentioned labels must be present on the resource. + +.Procedure +* The Tekton plugin is pre-loaded in RHDH with basic configuration properties. To enable it, set the disabled property to false as follows: ++ +-- +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-tekton + disabled: false +---- +-- \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-readme.adoc new file mode 100644 index 0000000000..010180df3b --- /dev/null +++ b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-readme.adoc @@ -0,0 +1,167 @@ +[[installation-and-configuration-tekton]] += Installation and configuration of Tekton + +You can use the Tekton plugin to visualize the results of CI/CD pipeline runs on your Kubernetes or OpenShift clusters. The plugin allows users to visually see high level status of all associated tasks in the pipeline for their applications. + +== For administrators + +[[installing-tekton-plugin]] +=== Installation + +.Prerequsites +* You have installed and configured the `@backstage/plugin-kubernetes` and `@backstage/plugin-kubernetes-backend` dynamic plugins. +//For more information about installing dynamic plugins, see xref:rhdh-installing-dynamic-plugins[]. +//Cannot xref across titles. Convert xref to a link. + +* You have configured the Kubernetes plugin to connect to the cluster using a `ServiceAccount`. + +* The `ClusterRole` must be granted for custom resources (PipelineRuns and TaskRuns) to the `ServiceAccount` accessing the cluster. ++ +[NOTE] +If you have the RHDH Kubernetes plugin configured, then the `ClusterRole` is already granted. + +* To view the pod logs, you have granted permissions for `pods/log`. + +* You can use the following code to grant the `ClusterRole` for custom resources and pod logs: ++ +-- +[source,yaml] +---- +kubernetes: + ... + customResources: + - group: 'tekton.dev' + apiVersion: 'v1' + plural: 'pipelineruns' + - group: 'tekton.dev' + apiVersion: 'v1' + + + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + - apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list + - watch + ... + - apiGroups: + - tekton.dev + resources: + - pipelineruns + - taskruns + verbs: + - get + - list +---- +-- ++ +You can use the prepared manifest for a read-only `ClusterRole`, which provides access for both Kubernetes plugin and Tekton plugin. + +* Add the following annotation to the entity's `catalog-info.yaml` file to identify whether an entity contains the Kubernetes resources: ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-id: +---- +-- + +* You can also add the `backstage.io/kubernetes-namespace` annotation to identify the Kubernetes resources using the defined namespace. ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-namespace: +---- +-- + +* Add the following annotation to the `catalog-info.yaml` file of the entity to enable the Tekton related features in RHDH. The value of the annotation identifies the name of the RHDH entity: ++ +-- +[source,yaml] +---- +annotations: + ... + + janus-idp.io/tekton : +---- +-- + +* Add a custom label selector, which RHDH uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations. ++ +-- +[source,yaml] +---- +annotations: + ... + + backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end' +---- +-- + +* Add the following label to the resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity: ++ +-- +[source,yaml] +---- +labels: + ... + + backstage.io/kubernetes-id: +---- +-- ++ +[NOTE] +When you use the label selector, the mentioned labels must be present on the resource. + +.Procedure +* The Tekton plugin is pre-loaded in RHDH with basic configuration properties. To enable it, set the disabled property to false as follows: ++ +-- +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-tekton + disabled: false +---- +-- + +== For users + +[[using-tekton-plugin]] +=== Using the Tekton plugin in RHDH +You can use the Tekton front-end plugin to view `PipelineRun` resources. + +.Prerequisites +* You have installed the Red Hat Developer Hub (RHDH). +* You have installed the Tekton plugin. For the installation process, see xref:installation-and-configuration-tekton[Installing and configuring the Tekton plugin]. + +.Procedure +. Open your RHDH application and select a component from the *Catalog* page. +. Go to the *CI* tab. ++ +The *CI* tab displays the list of PipelineRun resources associated with a Kubernetes cluster. The list contains pipeline run details, such as *NAME*, *VULNERABILITIES*, *STATUS*, *TASK STATUS*, *STARTED*, and *DURATION*. ++ +image::rhdh-plugins-reference/tekton-plugin-pipeline.png[ci-cd-tab-tekton] + +. Click the expand row button besides PipelineRun name in the list to view the PipelineRun visualization. The pipeline run resource includes tasks to complete. When you hover the mouse pointer on a task card, you can view the steps to complete that particular task. ++ +image::rhdh-plugins-reference/tekton-plugin-pipeline-expand.png[ci-cd-tab-tekton] diff --git a/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-user.adoc b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-user.adoc new file mode 100644 index 0000000000..ab5094488d --- /dev/null +++ b/artifacts/rhdh-plugins-reference/tekton/tekton-plugin-user.adoc @@ -0,0 +1,22 @@ +[[installation-and-configuration-tekton]] += Using the Tekton plugin + +You can use the Tekton plugin to visualize the results of CI/CD pipeline runs on your Kubernetes or OpenShift clusters. The plugin allows users to visually see high level status of all associated tasks in the pipeline for their applications. + +You can use the Tekton front-end plugin to view `PipelineRun` resources. + +.Prerequisites +* You have installed the Red Hat Developer Hub (RHDH). +* You have installed the Tekton plugin. For the installation process, see xref:installation-and-configuration-tekton[Installing and configuring the Tekton plugin]. + +.Procedure +. Open your RHDH application and select a component from the *Catalog* page. +. Go to the *CI* tab. ++ +The *CI* tab displays the list of PipelineRun resources associated with a Kubernetes cluster. The list contains pipeline run details, such as *NAME*, *VULNERABILITIES*, *STATUS*, *TASK STATUS*, *STARTED*, and *DURATION*. ++ +image::rhdh-plugins-reference/tekton-plugin-pipeline.png[ci-cd-tab-tekton] + +. Click the expand row button besides PipelineRun name in the list to view the PipelineRun visualization. The pipeline run resource includes tasks to complete. When you hover the mouse pointer on a task card, you can view the steps to complete that particular task. ++ +image::rhdh-plugins-reference/tekton-plugin-pipeline-expand.png[ci-cd-tab-tekton] diff --git a/artifacts/rhdh-plugins-reference/topology/topology-install-dynamic-plugin.adoc b/artifacts/rhdh-plugins-reference/topology/topology-install-dynamic-plugin.adoc new file mode 100644 index 0000000000..243b78b9a5 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/topology/topology-install-dynamic-plugin.adoc @@ -0,0 +1,15 @@ +.Installation +The Topology plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows: + +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-topology + disabled: false +---- + +.Configuration \ No newline at end of file diff --git a/artifacts/rhdh-plugins-reference/topology/topology-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/topology/topology-plugin-admin.adoc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/artifacts/rhdh-plugins-reference/topology/topology-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/topology/topology-plugin-readme.adoc new file mode 100644 index 0000000000..57662a8897 --- /dev/null +++ b/artifacts/rhdh-plugins-reference/topology/topology-plugin-readme.adoc @@ -0,0 +1,412 @@ += Topology + +The Topology plugin enables you to visualize the workloads such as Deployment, Job, Daemonset, Statefulset, CronJob, and Pods powering any service on the Kubernetes cluster. + +== For administrators + +=== Installation + +ifeval::[{doc-show-dynamic-content} == false] +The Red Hat Plug-ins for Backstage (RHPIB) packages are hosted in a separate NPM registry, which is maintained by Red Hat. To use these packages, you must adjust your NPM configuration to pull the `@redhat` scoped packages: + +[source] +---- +# update your .npmrc or .yarnrc file +yarn config set "@redhat:registry" https://npm.registry.redhat.com +# then pull a package +yarn add @redhat/backstage-plugin-quay +---- + +For more information, see link:https://docs.npmjs.com/cli/v9/configuring-npm/npmrc[npm docs]. + +Creating a `.npmrc` file ensures that all the packages are scoped under `@redhat` and are fetched from link:https://npm.registry.redhat.com/[Red Hat's NPM registry], while the rest dependencies remain sourced from other link:registry.npmjs.org[registry]. + +Using this configuration, you can proceed with the installation of the individual packages. +endif::[] +.Prerequisites + +* The Kubernetes plugins including `@backstage/plugin-kubernetes` and `@backstage/plugin-kubernetes-backend` are installed and configured by following the https://backstage.io/docs/features/kubernetes/installation[installation] and https://backstage.io/docs/features/kubernetes/configuration[configuration] guides. +* The Kubernetes plugin is configured and connects to the cluster using a `ServiceAccount`. +* The https://backstage.io/docs/features/kubernetes/configuration#role-based-access-control[`ClusterRole`] must be granted to `ServiceAccount` accessing the cluster. If you have the {product-short} Kubernetes plugin configured, then the `ClusterRole` is already granted. +* The following must be added in``customResources`` component in the https://backstage.io/docs/features/kubernetes/configuration#configuring-kubernetes-clusters[`app-config.yaml`] file to view the OpenShift route: ++ +-- +[source,yaml] +---- + kubernetes: + ... + customResources: + - group: 'route.openshift.io' + apiVersion: 'v1' + plural: 'routes' +---- + +Also, ensure that the route is granted a https://backstage.io/docs/features/kubernetes/configuration#role-based-access-control[`ClusterRole`]. You can use the following code to grant the `ClusterRole` to the route : + +[source,yaml] +---- + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - get + - list + +---- +-- + +* The following permission must be granted to the https://backstage.io/docs/features/kubernetes/configuration#role-based-access-control[`ClusterRole`] to be able to view the pod logs: ++ +-- +[source,yaml] +---- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - '' + resources: + - pods + - pods/log + verbs: + - get + - list + - watch +---- +-- + +* The following code must be added in `customResources` component in the https://backstage.io/docs/features/kubernetes/configuration#configuring-kubernetes-clusters[`app-config.yaml`] file to view the Tekton PipelineRuns list in the side panel and to view the latest PipelineRun status in the Topology node decorator: ++ +-- +[source,yaml] +---- + kubernetes: + ... + customResources: + - group: 'tekton.dev' + apiVersion: 'v1beta1' + plural: 'pipelines' + - group: 'tekton.dev' + apiVersion: 'v1beta1' + plural: 'pipelineruns' + - group: 'tekton.dev' + apiVersion: 'v1beta1' + plural: 'taskruns' +---- + +Also, ensure that the Pipeline, PipelineRun, and TaskRun are granted a https://backstage.io/docs/features/kubernetes/configuration#role-based-access-control[`ClusterRole`]. You can use the following code to grant the `ClusterRole` to Pipeline, PipelineRun, and TaskRun: + +[source,yaml] +---- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - tekton.dev + resources: + - pipelines + - pipelineruns + - taskruns + verbs: + - get + - list +---- +-- + +* The following configuration must be added in`customResources` component in the https://backstage.io/docs/features/kubernetes/configuration#configuring-kubernetes-clusters[`app-config.yaml`] file to view the edit code decorator: ++ +-- +[source,yaml] +---- + kubernetes: + ... + customResources: + - group: 'org.eclipse.che' + apiVersion: 'v2' + plural: 'checlusters' +---- + +Also, ensure that the `CheCluster` is granted a https://backstage.io/docs/features/kubernetes/configuration#role-based-access-control[`ClusterRole`] as shown in the following example code: + +[source,yaml] +---- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - org.eclipse.che + resources: + - checlusters + verbs: + - get + - list +---- +-- + +* The following annotations are added to workload resources in the `deployment.yaml` file to navigate to the GitHub repository of the associated application using the edit code decorator: ++ +-- +[source,yaml] +---- +annotations: + app.openshift.io/vcs-uri: +---- + +You can also add the following annotation to navigate to a specific branch: + +[source,yaml] +---- +annotations: + app.openshift.io/vcs-ref: +---- + +If Red Hat OpenShift Dev Spaces (RHODS) is installed and configured and Git URL annotations are also added in the workload YAML file, then clicking on the edit code decorator redirects you to the RHODS instance. For more information about installing RHODS, see https://access.redhat.com/documentation/en-us/red_hat_openshift_dev_spaces/3.7/html/administration_guide/installing-devspaces[Administration guide] of RHODS. + +When you deploy your application using the OCP git import flow, then you do not need to add the labels as import flow to the workload YAML file. Otherwise, you would need to add the labels to the workload YAML file manually. + +The labels are not similar to `backstage.io/edit-url` annotations as the added labels point to the source file of catalog entity metadata and is applied to {product-short} catalog entity metadata YAML file, but not to the Kubernetes resources. + +You can also add the `app.openshift.io/edit-url` annotation that you want to access using the decorator. + +[TIP] +==== +You can use the https://raw.githubusercontent.com/janus-idp/backstage-plugins/main/plugins/topology/manifests/clusterrole.yaml[prepared manifest for a read-only `ClusterRole`], which provides access for both Kubernetes and Topology plugin. +==== +-- + +* The following annotation is added to the entity's `catalog-info.yaml` file to identify whether an entitiy contains the Kubernetes resources: ++ +-- +[source,yaml] +---- +annotations: + backstage.io/kubernetes-id: +---- + +The following label is added to the resources so that the Kubernetes plugin receives the Kubernetes resources from the requested entity: + +[source,yaml] +---- +labels: + backstage.io/kubernetes-id: ` +---- + +[NOTE] +==== + +When using the label selector, the mentioned labels must be present on the resource. +==== +-- + +* The `backstage.io/kubernetes-namespace` annotation is added as follows to identify that the Kubernetes resources are using the defined namespace: ++ +-- +[source,yaml] +---- +annotations: + backstage.io/kubernetes-namespace: +---- + +If the `backstage.io/kubernetes-namespace` annotation is added to the `catalog-info.yaml` file, then the RHODS instance is not accessible using the edit code decorator. + +To retrieve the instance URL, CheCluster Custom Resource (CR) is required. The instance URL is not retrieved if the namespace annotation value is different from `openshift-devspaces` as CheCluster CR is created in `openshift-devspaces` namespace. +-- + +* A custom label selector is added, which {product-short} uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations. For example: ++ +-- +[source,yaml] +---- +annotations: + backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end' +---- + +If you have multiple entities while RHODS is configured and want multiple entities to support the edit code decorator that redirects to the RHODS instance, you can add the `backstage.io/kubernetes-label-selector` annotation to the `catalog-info.yaml` file for each entity as follows: + +[source,yaml] +---- +annotations: + backstage.io/kubernetes-label-selector: 'component in (,che)' +---- + +If you are using the previous custom label selector, then make sure that you add the following labels to your resources so that the Kubernetes plugin receives the Kubernetes resources from the requested entity: + +[source,yaml] +---- +labels: + component: che # add this label to your che cluster instance +---- + +[source,yaml] +---- +labels: + component: # add this label to the other resources associated with your entity +---- + +You can also write your own custom query for the label selector with unique labels to differentiate your entities. However, you need to ensure that you add those labels to the resources associated with your entities including your CheCluster instance. +-- + +* The following label is added to workload resources in the `deployment.yaml` file to display runtime icon in the topology nodes: ++ +-- +[source,yaml] +---- +labels: + app.openshift.io/runtime: +---- + +Alternatively, you can include the following label to display the runtime icon: + +[source,yaml] +---- +labels: + app.kubernetes.io/name: +---- + +The `` parameter in the previous example label supports the following values: + +* `django` +* `dotnet` +* `drupal` +* `go-gopher` +* `golang` +* `grails` +* `jboss` +* `jruby` +* `js` +* `nginx` +* `nodejs` +* `openjdk` +* `perl` +* `phalcon` +* `php` +* `python` +* `quarkus` +* `rails` +* `redis` +* `rh-spring-boot` +* `rust` +* `java` +* `rh-openjdk` +* `ruby` +* `spring` +* `spring-boot` + +Any other value for `` parameter results in icons not being rendered for the node. +-- + +* The following label is added to display the workload resources such as Deployments and Pods in a visual group: ++ +-- +[source,yaml] +---- + ```yaml title="catalog-info.yaml" + labels: + app.kubernetes.io/part-of: + ``` +---- +-- + +* The following annotation is added to display the workload resources such as Deployments and Pods with a visual connector: ++ +-- +[source] +---- + ```yaml title="catalog-info.yaml" + annotations: + app.openshift.io/connects-to: '[{"apiVersion": ,"kind": ,"name": }]' + ``` +---- + +For more information about the labels and annotations, see https://github.com/redhat-developer/app-labels/blob/master/labels-annotation-for-openshift.adoc[Guidelines for labels and annotations for OpenShift applications]. +-- + +.Procedure +ifeval::[{doc-show-dynamic-content} == true] +include::../../../artifacts/rhdh-plugins-reference/topology/topology-install-dynamic-plugin.adoc[leveloffset=+2] +endif::[] + +ifeval::[{doc-show-dynamic-content} == false] +. Install the Topology plugin using the following command: ++ +-- +[source,console] +---- +yarn workspace app add @redhat/backstage-plugin-topology +---- +-- + +. Enable *TOPOLOGY* tab in `packages/app/src/components/catalog/EntityPage.tsx`: ++ +-- +[source] +---- + ```tsx title="packages/app/src/components/catalog/EntityPage.tsx" + /* highlight-add-next-line */ + import { TopologyPage } from '@redhat/backstage-plugin-topology'; + + const serviceEntityPage = ( + + {/* ... */} + {/* highlight-add-start */} + + + + {/* highlight-add-end */} + + ); + ``` +---- +-- +endif::[] + +== For users + +=== Using the Topology plugin in {product-short} + +Topology is a front-end plugin that enables you to view the workloads as nodes that power any service on the Kubernetes cluster. + +.Prerequisites + +* Your {product-short} application is installed and running. +* You have installed the Topology plugin. For the installation process, see <>. + +.Procedure + +. Open your {product-short} application and select a component from the *Catalog* page. +. Go to the *TOPOLOGY* tab and you can view the workloads such as Deployments, Pods as nodes. ++ +image::topology-tab-user1.png[topology-tab] + +. Select a node and a pop-up appears on the right side, which contains two tabs: *Details* and *Resources*. ++ +-- +The *Details* and *Resources* tab contain the associated information and resources of the node. + +image::topology-tab-user2.png[topology-tab-details] +-- + +. Click on the *Open URL* button on the top of a node. ++ +-- +image::topology-tab-user3.png[topology-tab-open-url] + +When you click on the open URL button, it allows you to access the associated *Ingresses* and runs your application in a new tab. +-- diff --git a/artifacts/rhdh-plugins-reference/topology/topology-plugin-user.adoc b/artifacts/rhdh-plugins-reference/topology/topology-plugin-user.adoc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/artifacts/snip-conscious-language.adoc b/artifacts/snip-conscious-language.adoc new file mode 100644 index 0000000000..e8515c6a85 --- /dev/null +++ b/artifacts/snip-conscious-language.adoc @@ -0,0 +1,5 @@ +[preface] +[id='snip-conscious-language_{context}'] += Making open source more inclusive + +Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[_our CTO Chris Wright's message_]. diff --git a/artifacts/snip-customer-support-info.adoc b/artifacts/snip-customer-support-info.adoc new file mode 100644 index 0000000000..4143e8b937 --- /dev/null +++ b/artifacts/snip-customer-support-info.adoc @@ -0,0 +1,8 @@ +[preface] +[id='snip-customer-support-info_{context}'] += Red Hat Developer Hub support + +If you experience difficulty with a procedure described in this documentation, visit the http://access.redhat.com[Red Hat Customer Portal]. You can use the Red Hat Customer Portal for the following purposes: + +* To search or browse through the Red Hat Knowledgebase of technical support articles about Red Hat products. +* To create a https://access.redhat.com/support/cases/#/case/new/get-support?caseCreate=true[support case] for {company-name} Global Support Services (GSS). For support case creation, select *{product}* as the product and select the appropriate product version. For detailed information about supported platforms, see link:{release-notes-url}#con-release-notes-overview.adoc[Supported Platforms] and the link:https://access.redhat.com/support/policy/updates/developerhub[{product} Life Cycle]. \ No newline at end of file diff --git a/artifacts/snip-dynamic-plugins-support.adoc b/artifacts/snip-dynamic-plugins-support.adoc new file mode 100644 index 0000000000..0b70b98922 --- /dev/null +++ b/artifacts/snip-dynamic-plugins-support.adoc @@ -0,0 +1,12 @@ +[id='snip-dynamic-plugins-support_{context}'] + += Technology Preview plugins + +[IMPORTANT] +==== +{product} includes a select number of Technology Preview plugins, available for customers to configure and enable. These plugins are provided with support scoped per Technical Preview terms, might not be functionally complete, and {company-name} does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +// Additional details on how Red Hat provides support for bundled community dynamic plugins are available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== diff --git a/artifacts/snip-technology-preview.adoc b/artifacts/snip-technology-preview.adoc new file mode 100644 index 0000000000..e94b591ef7 --- /dev/null +++ b/artifacts/snip-technology-preview.adoc @@ -0,0 +1,6 @@ +[IMPORTANT] +==== +These features are for Technology Preview only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. +==== diff --git a/assemblies/assembly-add-custom-app-file-openshift.adoc b/assemblies/assembly-add-custom-app-file-openshift.adoc new file mode 100644 index 0000000000..21978f413c --- /dev/null +++ b/assemblies/assembly-add-custom-app-file-openshift.adoc @@ -0,0 +1,25 @@ +[id='assembly-add-custom-app-file-openshift_{context}'] += Adding a custom application configuration file to {ocp-brand-name} + +To access the {product}, you must add a custom application configuration file to {ocp-brand-name}. In {ocp-short}, you can use the following content as a base template to create a ConfigMap named `app-config-rhdh`: + +[source,yaml,subs="attributes+"] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +data: + app-config-rhdh.yaml: | + app: + title: {product} +---- + +You can add the custom application configuration file to {ocp-short} in one of the following ways: + +* The {product} Operator +* The {product} Helm chart + +include::modules/getting-started/proc-add-custom-app-file-openshift-helm.adoc[leveloffset=+1] + +include::modules/installation/proc-add-custom-app-config-file-ocp-operator.adoc[leveloffset=+1] diff --git a/assemblies/assembly-admin-templates.adoc b/assemblies/assembly-admin-templates.adoc new file mode 100644 index 0000000000..e200a047a5 --- /dev/null +++ b/assemblies/assembly-admin-templates.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-admin-templates"] += Managing templates +:context: assembly-admin-templates + +toc::[] + +A template is a form composed of different UI fields that is defined in a YAML file. Templates include _actions_, which are steps that are executed in sequential order and can be executed conditionally. + +You can use templates to easily create {product} components, and then publish these components to different locations, such as the {product} software catalog, or repositories in GitHub or GitLab. + +include::modules/templates/proc-creating-templates.adoc[leveloffset=+1] +include::modules/templates/ref-creating-templates.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources +* link:https://backstage.io/docs/features/software-templates/writing-templates[Backstage documentation - Writing Templates] +* link:https://backstage.io/docs/features/software-templates/builtin-actions[Backstage documentation - Builtin actions] +* link:https://backstage.io/docs/features/software-templates/writing-custom-actions[Backstage documentation - Writing Custom Actions] + +include::modules/templates/proc-adding-templates.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources +* link:{authentication-book-url}#assembly-auth-provider-github[Enabling the GitHub authentication provider] diff --git a/assemblies/assembly-audit-log.adoc b/assemblies/assembly-audit-log.adoc new file mode 100644 index 0000000000..f70f4e64ec --- /dev/null +++ b/assemblies/assembly-audit-log.adoc @@ -0,0 +1,46 @@ +[id="assembly-audit-log"] += Audit logs in {product} +:context: assembly-audit-log + +Audit logs are a chronological set of records documenting the user activities, system events, and data changes that affect your {product} users, administrators, or components. Administrators can view {product-short} audit logs in the {ocp-short} web console to monitor scaffolder events, changes to the RBAC system, and changes to the Catalog database. Audit logs include the following information: + +* Name of the audited event +* Actor that triggered the audited event, for example, terminal, port, IP address, or hostname +* Event metadata, for example, date, time +* Event status, for example, `success`, `failure` +* Severity levels, for example, `info`, `debug`, `warn`, `error` + +You can use the information in the audit log to achieve the following goals: + +Enhance security:: +Trace activities, including those initiated by automated systems and software templates, back to their source. Know when software templates are executed, as well as the details of application and component installations, updates, configuration changes, and removals. + +Automate compliance:: +Use streamlined processes to view log data for specified points in time for auditing purposes or continuous compliance maintenance. + +Debug issues:: +Use access records and activity details to fix issues with software templates or plugins. + +[NOTE] +==== +Audit logs are not forwarded to the internal log store by default because this does not provide secure storage. You are responsible for ensuring that the system to which you forward audit logs is compliant with your organizational and governmental regulations, and is properly secured. +==== + +[role="_additional-resources"] +.Additional resources + +* For more information about logging in {ocp-short}, see link:https://docs.openshift.com/container-platform/latest/observability/logging/cluster-logging.html[About Logging] +* For a complete list of fields that a {product-short} audit log can include, see xref:ref-audit-log-fields.adoc_{context}[] +* For a list of scaffolder events that a {product-short} audit log can include, see xref:ref-audit-log-scaffolder-events.adoc_{context}[] + +include::modules/getting-started/con-audit-log-config.adoc[leveloffset=+1] + +include::modules/getting-started/proc-audit-log-view.adoc[leveloffset=+1] + +include::modules/getting-started/ref-audit-log-fields.adoc[leveloffset=+2] + +include::modules/getting-started/ref-audit-log-scaffolder-events.adoc[leveloffset=+2] + +include::modules/getting-started/ref-audit-log-catalog-events.adoc[leveloffset=+2] + +include::modules/getting-started/ref-audit-log-file-rotation-overview.adoc[leveloffset=+1] diff --git a/assemblies/assembly-authenticating-with-github.adoc b/assemblies/assembly-authenticating-with-github.adoc new file mode 100644 index 0000000000..70d1c5a361 --- /dev/null +++ b/assemblies/assembly-authenticating-with-github.adoc @@ -0,0 +1,13 @@ +[id="assembly-auth-provider-github"] += Enabling the GitHub authentication provider + +To authenticate users with GitHub or GitHub Enterprise: + +. xref:enabling-authentication-with-github[Enable the GitHub authentication provider in {product-short}]. +. xref:provisioning-users-from-github-to-the-software-catalog[Provision users from GitHub to the software catalog]. + +include::modules/authentication/proc-enabling-authentication-with-github.adoc[leveloffset=+1] + + +include::modules/authentication/proc-provisioning-users-from-github-to-the-software-catalog.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-authenticating-with-microsoft-azure.adoc b/assemblies/assembly-authenticating-with-microsoft-azure.adoc new file mode 100644 index 0000000000..d575ae393e --- /dev/null +++ b/assemblies/assembly-authenticating-with-microsoft-azure.adoc @@ -0,0 +1,13 @@ +[id="assembly-authenticating-with-microsoft-azure"] += Authentication with Microsoft Azure + +To authenticate users with Microsoft Azure: + +. xref:enabling-authentication-with-microsoft-azure[Enable authentication with Microsoft Azure]. +. xref:provisioning-users-from-microsoft-azure-to-the-software-catalog[Provision users from Microsoft Azure to the software catalog]. + +include::modules/authentication/proc-enabling-authentication-with-microsoft-azure.adoc[leveloffset=+1] + + +include::modules/authentication/proc-provisioning-users-from-microsoft-azure-to-the-software-catalog.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-authenticating-with-rhsso.adoc b/assemblies/assembly-authenticating-with-rhsso.adoc new file mode 100644 index 0000000000..d3d5b1c95c --- /dev/null +++ b/assemblies/assembly-authenticating-with-rhsso.adoc @@ -0,0 +1,13 @@ +[id="assembly-authenticating-with-rhsso"] += Authenticating with Red Hat Single Sign-On (RHSSO) + +To authenticate users with Red Hat Single Sign-On (RHSSO): + +. xref:enabling-authentication-with-rhsso[Enable the OpenID Connect (OIDC) authentication provider in RHDH]. +. xref:provisioning-users-from-rhsso-to-the-software-catalog[Provision users from Red Hat Single-Sign On (RHSSO) to the software catalog]. + +include::modules/authentication/proc-enabling-authentication-with-rhsso.adoc[leveloffset=+1] + +include::modules/authentication/proc-provisioning-users-from-rhsso-to-the-software-catalog.adoc[leveloffset=+1] + +include::modules/authentication/proc-creating-a-custom-transformer-to-provision-users-from-rhsso-to-the-software-catalog.adoc[leveloffset=+1] diff --git a/assemblies/assembly-authenticating-with-the-guest-user.adoc b/assemblies/assembly-authenticating-with-the-guest-user.adoc new file mode 100644 index 0000000000..2e87fe7882 --- /dev/null +++ b/assemblies/assembly-authenticating-with-the-guest-user.adoc @@ -0,0 +1,12 @@ +:_mod-docs-content-type: PROCEDURE +[id="authenticating-with-the-guest-user_{context}"] += Authenticating with the Guest user + +To explore {product-short} features, you can skip configuring authentication and authorization. +You can configure {product-short} to log in as a Guest user and access {product-short} features. + +include::modules/authentication/proc-authenticationg-with-the-guest-user-on-an-operator-based-installation.adoc[leveloffset=+1] + + +include::modules/authentication/proc-authenticationg-with-the-guest-user-on-a-helm-based-installation.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-bulk-importing-from-github.adoc b/assemblies/assembly-bulk-importing-from-github.adoc new file mode 100644 index 0000000000..0641acf589 --- /dev/null +++ b/assemblies/assembly-bulk-importing-from-github.adoc @@ -0,0 +1,15 @@ +[id="bulk-importing-github-repositories"] += Bulk importing GitHub repositories + +include::{docdir}/artifacts/snip-technology-preview.adoc[] + +{product} can automate GitHub repositories onboarding and track their import status. + +include::modules/importing-repositories/procedure-enabling-the-bulk-import-from-github-feature.adoc[leveloffset=+1] + +include::modules/importing-repositories/procedure-importing-multiple-repositories-from-github.adoc[leveloffset=+1] + +include::modules/importing-repositories/procedure-managing-the-imported-repository-list.adoc[leveloffset=+1] + +include::modules/importing-repositories/procedure-understanding-bulk-import-audit-logs.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-configuring-authorization-in-rhdh.adoc b/assemblies/assembly-configuring-authorization-in-rhdh.adoc new file mode 100644 index 0000000000..6089d3cc52 --- /dev/null +++ b/assemblies/assembly-configuring-authorization-in-rhdh.adoc @@ -0,0 +1,56 @@ +[id='configuring-authorization-in-rhdh'] += Configuring authorization in {product} + +include::modules/authorization/con-rbac-overview.adoc[leveloffset=+1] + + +include::modules/authorization/ref-rbac-permission-policies.adoc[leveloffset=+1] + + +include::modules/authorization/con-rbac-config-permission-policies.adoc[leveloffset=+2] + + +include::modules/authorization/con-rbac-config-permission-policies-admin.adoc[leveloffset=+3] + + +include::modules/authorization/con-rbac-config-permission-policies-external-file.adoc[leveloffset=+3] + +include::modules/authorization/proc-mounting-the-policy-csv-file-using-the-operator.adoc[leveloffset=+4] + +include::modules/authorization/proc-mounting-the-policy-csv-file-using-helm.adoc[leveloffset=+4] + + +include::modules/authorization/con-rbac-conditional-policies-rhdh.adoc[leveloffset=+1] + + +include::modules/authorization/ref-rbac-conditional-policy-definition.adoc[leveloffset=+2] + + +include::modules/authorization/proc-rbac-config-conditional-policy-file.adoc[leveloffset=+2] + + +include::modules/authorization/proc-rbac-ui-manage-roles.adoc[leveloffset=+1] + + +include::modules/authorization/proc-rbac-ui-create-role.adoc[leveloffset=+2] + + +include::modules/authorization/proc-rbac-ui-edit-role.adoc[leveloffset=+2] + + +include::modules/authorization/proc-rbac-ui-delete-role.adoc[leveloffset=+2] + + +include::modules/authorization/con-user-stats-rhdh.adoc[leveloffset=+1] + + +include::modules/authorization/proc-download-user-stats-rhdh.adoc[leveloffset=+2] + + +include::modules/authorization/con-rbac-rest-api.adoc[leveloffset=+1] + + +include::modules/authorization/proc-rbac-send-request-rbac-rest-api.adoc[leveloffset=+2] + + +include::modules/authorization/ref-rbac-rest-api-endpoints.adoc[leveloffset=+2] diff --git a/assemblies/assembly-configuring-external-postgresql-databases.adoc b/assemblies/assembly-configuring-external-postgresql-databases.adoc new file mode 100644 index 0000000000..3f63696dbf --- /dev/null +++ b/assemblies/assembly-configuring-external-postgresql-databases.adoc @@ -0,0 +1,21 @@ +[id="assembly-configuring-external-postgresql-databases"] += Configuring external PostgreSQL databases + +As an administrator, you can configure and use external PostgreSQL databases in {product}. You can use a PostgreSQL certificate file to configure an external PostgreSQL instance using the Operator or Helm Chart. + +[NOTE] +==== +{product-short} supports only configuring external PostgreSQL databases. You can perform maintenance activities, such as backing up your data or configuring high availability (HA) for the external PostgreSQL databases. + +Also, configuring an external PostgreSQL instance by using the {product} Operator or Helm Chart is not intended for production use. +==== + + +include::modules/admin/proc-configuring-postgresql-instance-using-operator.adoc[leveloffset=+1] + +include::modules/admin/proc-configuring-postgresql-instance-using-helm.adoc[leveloffset=+1] + +include::modules/admin/proc-migrating-databases-to-an-external-server.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-customize-rhdh-theme.adoc b/assemblies/assembly-customize-rhdh-theme.adoc new file mode 100644 index 0000000000..37c87a0669 --- /dev/null +++ b/assemblies/assembly-customize-rhdh-theme.adoc @@ -0,0 +1,42 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-customize-rhdh-theme"] += Customizing the appearance of your {product} instance +:context: assembly-customize-rhdh-theme + +The following default theme configurations are available for {product}: + +The {product} theme:: Default theme configurations to make your developer portal look like a standard {product} instance. For more information, see xref:ref-customize-rhdh-default-rhdh_{context}[] + +The Backstage theme:: Default theme configurations to make your developer portal look like a standard Backstage instance. For more information, see xref:ref-customize-rhdh-default-backstage_{context}[] + +You can change or disable particular parameters in a default theme or create a fully customized theme by modifying the `app-config-rhdh.yaml` file. From the the `app-config-rhdh.yaml` file, you can customize common theme components, including the following: + +* Company name and logo +* Font color, size, and style of text in paragraphs, headings, headers, and buttons +* Header color, gradient, and shape +* Button color +* Navigation indicator color + +You can also customize some components from the {product-short} GUI, such as the theme mode (*Light Theme*, *Dark Theme*, or *Auto*). + +include::modules/customization/proc-customize-rhdh-theme-mode.adoc[leveloffset=+1] + +include::modules/customization/proc-customize-rhdh-branding-logo.adoc[leveloffset=+1] + +//include::modules/customization/proc-customize-rhdh-sidebar-logo.adoc[leveloffset=+1]//commented since the section has been merged with proc-customize-rhdh-branding-logo.adoc in PR #514 + +include::modules/customization/proc-customize-rhdh-sidebar-menuitems.adoc[leveloffset=+1] + +include::modules/customization/proc-customize-rhdh-tab-tooltip.adoc[leveloffset=+1] + +include::modules/customization/proc-customize-rhdh-palette.adoc[leveloffset=+1] + +include::modules/customization/proc-customize-rhdh-page-theme.adoc[leveloffset=+1] + +include::modules/customization/proc-customize-rhdh-font.adoc[leveloffset=+1] + +include::modules/customization/ref-customize-rhdh-default-rhdh.adoc[leveloffset=+1] + +include::modules/customization/ref-customize-rhdh-default-backstage.adoc[leveloffset=+1] + +include::modules/customization/ref-customize-rhdh-custom-components.adoc[leveloffset=+1] diff --git a/assemblies/assembly-enabling-authentication.adoc b/assemblies/assembly-enabling-authentication.adoc new file mode 100644 index 0000000000..7d5308b51d --- /dev/null +++ b/assemblies/assembly-enabling-authentication.adoc @@ -0,0 +1,63 @@ +[id='enabling-authentication'] += Enabling authentication in {product} + +Depending on your organization's security policies, you might require to identify and authorize users before giving them access to resources, such as {product}. + +In {product-short}, authentication and authorization are two separate processes: + +. Authentication defines the user identity, and passes on this information to {product-short}. +Read the following chapters to configure authentication in {product-short}. + +. Authorization defines what the authenticated identity can access or do in {product-short}. +See link:{authorization-book-url}[{authorization-book-title}]. + +[TIP] +.Not recommended for production +==== +To explore {product-short} features, you can enable the guest user to skip configuring authentication and authorization, log in as the guest user, and access all the features. +==== + +The authentication system in {product-short} is handled by external authentication providers. + +{product-short} supports following authentication providers: + +* Red Hat Single-Sign On (RHSSO) +* GitHub +* Microsoft Azure + +To identify users in {product-short}, configure: + +* One (and only one) authentication provider for sign-in and identification. +* Optionally, additional authentication providers for identification, to add more information to the user identity, or enable access to additional external resources. + +For each authentication provider, set up the shared secret that the authentication provider and {product-short} require to communicate, first in the authentication provider, then in {product-short}. + +{product-short} stores user identity information in the {product-short} software catalog. + +[TIP] +.Not recommended for production +==== +To explore the authentication system and use {product-short} without authorization policies, you can bypass the {product-short} software catalog and start using {product-short} without provisioning the {product-short} software catalog. +==== + +To get, store, and update additional user information, such as group or team ownership, with the intention to use this data to define authorization policies, provision users and groups in the {product-short} software catalog. + +[IMPORTANT] +==== +{product-short} uses a one-way synchronization system to provision users and groups from your authentication system to the {product-short} software catalog. +Therefore, deleting users and groups by using {product-short} Web UI or REST API might have unintended consequences. +==== + + + +include::assembly-authenticating-with-the-guest-user.adoc[leveloffset=+1] + + +include::assembly-authenticating-with-rhsso.adoc[leveloffset=+1] + + +include::assembly-authenticating-with-github.adoc[leveloffset=+1] + + +include::assembly-authenticating-with-microsoft-azure.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-install-rhdh-ocp-helm.adoc b/assemblies/assembly-install-rhdh-ocp-helm.adoc new file mode 100644 index 0000000000..662d44a650 --- /dev/null +++ b/assemblies/assembly-install-rhdh-ocp-helm.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-install-rhdh-ocp-helm"] += Installing {product} on {ocp-short} with the Helm chart +:context: assembly-install-rhdh-ocp-helm + +You can install {product} on {ocp-short} by using the Helm chart with one of the following methods: + +* The {ocp-short} console +* The Helm CLI + +include::modules/installation/proc-install-rhdh-ocp-helm-gui.adoc[leveloffset=+1] + +include::modules/installation/proc-install-rhdh-ocp-helm-cli.adoc[leveloffset=+1] diff --git a/assemblies/assembly-install-rhdh-ocp-operator.adoc b/assemblies/assembly-install-rhdh-ocp-operator.adoc new file mode 100644 index 0000000000..c02c4b2819 --- /dev/null +++ b/assemblies/assembly-install-rhdh-ocp-operator.adoc @@ -0,0 +1,10 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-install-rhdh-ocp-operator"] += Installing {product} on {ocp-short} with the Operator +:context: assembly-install-rhdh-ocp-operator + +You can install {product} on {ocp-short} by using the {product} Operator in the {ocp-short} console. + +include::modules/installation/proc-install-operator.adoc[leveloffset=+1] + +include::modules/installation/proc-install-rhdh-ocp-operator.adoc[leveloffset=+1] diff --git a/assemblies/assembly-install-rhdh-osd-gcp.adoc b/assemblies/assembly-install-rhdh-osd-gcp.adoc new file mode 100644 index 0000000000..951bc0c564 --- /dev/null +++ b/assemblies/assembly-install-rhdh-osd-gcp.adoc @@ -0,0 +1,15 @@ +[id="assembly-install-rhdh-osd-gcp"] += Installing {product} on {osd-short} on {gcp-brand-name} +:context: assembly-install-rhdh-osd-gcp + +You can install {product-short} on {osd-short} on {gcp-brand-name} ({gcp-short}) using one of the following methods: + +* The {product} Operator +* The {product} Helm chart + +// Operator procedure +include::modules/installation/proc-install-rhdh-osd-gcp-operator.adoc[leveloffset=+1] + +// Helm procedure +include::modules/installation/proc-install-rhdh-osd-gcp-helm.adoc[leveloffset=+1] + diff --git a/assemblies/assembly-release-notes-breaking-changes.adoc b/assemblies/assembly-release-notes-breaking-changes.adoc new file mode 100644 index 0000000000..32ccedc406 --- /dev/null +++ b/assemblies/assembly-release-notes-breaking-changes.adoc @@ -0,0 +1,20 @@ +:_content-type: ASSEMBLY +[id="breaking-changes"] += Breaking changes + +This section lists breaking changes in {product} {product-version}. + + +include::modules/release-notes/snip-removed-functionality-rhidp-3048.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-removed-functionality-rhidp-3074.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-removed-functionality-rhidp-3187.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-removed-functionality-rhidp-4293.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-release-notes-deprecated-functionalities.adoc b/assemblies/assembly-release-notes-deprecated-functionalities.adoc new file mode 100644 index 0000000000..9b960772e9 --- /dev/null +++ b/assemblies/assembly-release-notes-deprecated-functionalities.adoc @@ -0,0 +1,11 @@ +:_content-type: ASSEMBLY +[id="deprecated-functionalities"] += Deprecated functionalities + +This section lists deprecated functionalities in {product} {product-version}. + + +include::modules/release-notes/snip-deprecated-functionality-rhidp-1138.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-release-notes-fixed-issues.adoc b/assemblies/assembly-release-notes-fixed-issues.adoc new file mode 100644 index 0000000000..4462772088 --- /dev/null +++ b/assemblies/assembly-release-notes-fixed-issues.adoc @@ -0,0 +1,71 @@ +:_content-type: ASSEMBLY +[id="fixed-issues"] += Fixed issues + +This section lists issues fixed in {product} {product-version}. + + +include::modules/release-notes/snip-bug-fix-rhidp-1334.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2139.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2374.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2412.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2438.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2529.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2716.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-2728.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3159.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3217.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3260.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3458.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3471.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3580.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3601.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3612.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3735.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-3896.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-4013.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-4046.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-bug-fix-rhidp-4200.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-release-notes-fixed-security-issues.adoc b/assemblies/assembly-release-notes-fixed-security-issues.adoc new file mode 100644 index 0000000000..a741a16d1a --- /dev/null +++ b/assemblies/assembly-release-notes-fixed-security-issues.adoc @@ -0,0 +1,12 @@ +:_content-type: ASSEMBLY +[id="fixed-security-issues"] += Fixed security issues + +This section lists security issues fixed in {product} {product-version}. + +== {product} {product-bundle-version} + +include::modules/release-notes/snip-fixed-security-issues-in-product-1.3.0.adoc[leveloffset=+2] + +include::modules/release-notes/snip-fixed-security-issues-in-rpm-1.3.0.adoc[leveloffset=+2] + diff --git a/assemblies/assembly-release-notes-known-issues.adoc b/assemblies/assembly-release-notes-known-issues.adoc new file mode 100644 index 0000000000..244af1c2fe --- /dev/null +++ b/assemblies/assembly-release-notes-known-issues.adoc @@ -0,0 +1,20 @@ +:_content-type: ASSEMBLY +[id="known-issues"] += Known issues + +This section lists known issues in {product} {product-version}. + + +include::modules/release-notes/snip-known-issue-rhidp-4378.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-known-issue-rhidp-4069.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-known-issue-rhidp-4067.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-known-issue-rhidp-3931.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-release-notes-new-features.adoc b/assemblies/assembly-release-notes-new-features.adoc new file mode 100644 index 0000000000..89161bc467 --- /dev/null +++ b/assemblies/assembly-release-notes-new-features.adoc @@ -0,0 +1,68 @@ +:_content-type: ASSEMBLY +[id="new-features"] += New features + +This section highlights new features in {product} {product-version}. + + +include::modules/release-notes/snip-feature-rhidp-2232.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2341.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2615.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2643.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2644.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2695.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2723.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2736.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2768.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2790.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-2818.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2865.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2888.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-2907.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-3064.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-3125.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-3177.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-3569.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-feature-rhidp-3666.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-enhancement-rhidp-3826.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-release-notes-technology-preview.adoc b/assemblies/assembly-release-notes-technology-preview.adoc new file mode 100644 index 0000000000..b3fab28c45 --- /dev/null +++ b/assemblies/assembly-release-notes-technology-preview.adoc @@ -0,0 +1,22 @@ +:_content-type: ASSEMBLY +[id="technology-preview"] += Technology Preview + +This section lists Technology Preview features in {product} {product-version}. + +[IMPORTANT] +==== +Technology Preview features provide early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process. +However, these features are not fully supported under Red Hat Subscription Level Agreements, may not be functionally complete, and are not intended for production use. +As Red Hat considers making future iterations of Technology Preview features generally available, we will attempt to resolve any issues that customers experience when using these features. +See: link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview support scope]. +==== + + +include::modules/release-notes/snip-technology-preview-rhidp-1397.adoc[leveloffset=+1] + + +include::modules/release-notes/snip-technology-preview-rhidp-3713.adoc[leveloffset=+1] + + + diff --git a/assemblies/assembly-rhdh-integration-aks.adoc b/assemblies/assembly-rhdh-integration-aks.adoc new file mode 100644 index 0000000000..9f56056b9c --- /dev/null +++ b/assemblies/assembly-rhdh-integration-aks.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-rhdh-integration-aks"] += {product} integration with {aks-brand-name} ({aks-short}) +:context: assembly-rhdh-integration-aks + +toc::[] + + +You can integrate {product-short} with {aks-brand-name} ({aks-short}), which provides a significant advancement in development, offering a streamlined environment for building, deploying, and managing your applications. + +This integration requires the deployment of {product-short} on {aks-short} using one of the following methods: + +* The Helm chart +* The {product} Operator + +include::modules/admin/proc-rhdh-monitoring-logging-aks.adoc[leveloffset=+1] + +include::modules/admin/proc-using-azure-auth-provider.adoc[leveloffset=+1] diff --git a/assemblies/assembly-rhdh-integration-aws.adoc b/assemblies/assembly-rhdh-integration-aws.adoc new file mode 100644 index 0000000000..89b2161acc --- /dev/null +++ b/assemblies/assembly-rhdh-integration-aws.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-rhdh-integration-aws"] += {product} integration with {aws-brand-name} ({aws-short}) +:context: assembly-rhdh-integration-aws + +toc::[] + +You can integrate your {product} application with {aws-brand-name} ({aws-short}), which can help you streamline your workflows within the {aws-short} ecosystem. Integrating the {product-short} resources with {aws-short} provides access to a comprehensive suite of tools, services, and solutions. + +The integration with {aws-short} requires the deployment of {product-short} in {eks-name} (EKS) using one of the following methods: + +* The Helm chart +* The {product} Operator + +include::modules/admin/proc-rhdh-monitoring-logging-aws.adoc[leveloffset=+1] + +include::modules/admin/proc-using-aws-cognito-auth-provider.adoc[leveloffset=+1] diff --git a/assemblies/assembly-rhdh-observability.adoc b/assemblies/assembly-rhdh-observability.adoc new file mode 100644 index 0000000000..a954ee8de9 --- /dev/null +++ b/assemblies/assembly-rhdh-observability.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-rhdh-observability"] += Enabling observability for {product} on {ocp-short} +:context: assembly-rhdh-observability + +toc::[] + +// Metrics +In {ocp-short}, metrics are exposed through an HTTP service endpoint under the `/metrics` canonical name. You can create a `ServiceMonitor` custom resource (CR) to scrape metrics from a service endpoint in a user-defined project. + +include::modules/observability/proc-admin-enabling-metrics-ocp-helm.adoc[leveloffset=+1] +include::modules/observability/proc-admin-enabling-metrics-ocp-operator.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_{context}"] +== Additional resources +* link:https://docs.openshift.com/container-platform/latest/observability/monitoring/managing-metrics.html[{ocp-short} - Managing metrics] diff --git a/assemblies/assembly-rhdh-telemetry.adoc b/assemblies/assembly-rhdh-telemetry.adoc new file mode 100644 index 0000000000..435f52b0ab --- /dev/null +++ b/assemblies/assembly-rhdh-telemetry.adoc @@ -0,0 +1,41 @@ +[id="assembly-rhdh-telemetry"] += Telemetry data collection + +The telemetry data collection feature helps in collecting and analyzing the telemetry data to improve your experience with {product}. This feature is enabled by default. + +[IMPORTANT] +==== +As an administrator, you can disable the telemetry data collection feature based on your needs. For example, in an air-gapped environment, you can disable this feature to avoid needless outbound requests affecting the responsiveness of the {product-very-short} application. For more details, see the link:{LinkAdminGuide}#disabling-telemetry-data-collection_admin-rhdh[Disabling telemetry data collection in {product-very-short}] section. +==== + +{company-name} collects and analyzes the following data: + +* Events of page visits and clicks on links or buttons. +* System-related information, for example, locale, timezone, user agent including browser and OS details. +* Page-related information, for example, title, category, extension name, URL, path, referrer, and search parameters. +* Anonymized IP addresses, recorded as `0.0.0.0`. +* Anonymized username hashes, which are unique identifiers used solely to identify the number of unique users of the {product-very-short} application. + +With {product-very-short}, you can customize the telemetry data collection feature and the telemetry Segment source configuration based on your needs. + + +// disabling telemetry +include::modules/admin/ref-disabling-telemetry.adoc[leveloffset=+1] + +include::modules/admin/proc-disabling-telemetry-using-helm.adoc[leveloffset=+2] + +include::modules/admin/proc-disabling-telemetry-using-operator.adoc[leveloffset=+2] + +// enabling telemetry +include::modules/admin/ref-enabling-telemetry.adoc[leveloffset=+1] + +include::modules/admin/proc-enabling-telemetry-using-helm.adoc[leveloffset=+2] + +include::modules/admin/proc-enabling-telemetry-using-operator.adoc[leveloffset=+2] + +// customizing telemetry segment source +include::modules/admin/ref-customizing-telemetry-segment.adoc[leveloffset=+1] + +include::modules/admin/proc-customizing-telemetry-segment-using-helm.adoc[leveloffset=+2] + +include::modules/admin/proc-customizing-telemetry-segment-using-operator.adoc[leveloffset=+2] diff --git a/assemblies/assembly-running-rhdh-behind-a-proxy.adoc b/assemblies/assembly-running-rhdh-behind-a-proxy.adoc new file mode 100644 index 0000000000..d6a895a280 --- /dev/null +++ b/assemblies/assembly-running-rhdh-behind-a-proxy.adoc @@ -0,0 +1,13 @@ +[id="assembly-running-rhdh-behind-a-proxy"] += Running the {product-very-short} application behind a corporate proxy + +You can run the {product-very-short} application behind a corporate proxy by setting any of the following environment variables before starting the application: + +* `HTTP_PROXY`: Denotes the proxy to use for HTTP requests. +* `HTTPS_PROXY`: Denotes the proxy to use for HTTPS requests. + +Additionally, you can set the `NO_PROXY` environment variable to exclude certain domains from proxying. The variable value is a comma-separated list of hostnames that do not require a proxy to get reached, even if one is specified. + + +include::modules/admin/proc-configuring-proxy-in-helm-deployment.adoc[leveloffset=+1] +include::modules/admin/proc-configuring-proxy-in-operator-deployment.adoc[leveloffset=+1] diff --git a/assemblies/assembly-techdocs-plugin.adoc b/assemblies/assembly-techdocs-plugin.adoc new file mode 100644 index 0000000000..bf2122abdd --- /dev/null +++ b/assemblies/assembly-techdocs-plugin.adoc @@ -0,0 +1,58 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-techdocs-plugin_{context}"] += Configuring the TechDocs plugin in {product} + +The {product} TechDocs plugin helps your organization create, find, and use documentation in a central location and in a standardized way. For example: + +Docs-like-code approach:: +Write your technical documentation in Markdown files that are stored inside your project repository along with your code. + +Documentation site generation:: +Use MkDocs to create a full-featured, Markdown-based, static HTML site for your documentation that is rendered centrally in {product-short}. + +Documentation site metadata and integrations:: +See additional metadata about the documentation site alongside the static documentation, such as the date of the last update, the site owner, top contributors, open GitHub issues, Slack support channels, and Stack Overflow Enterprise tags. + +Built-in navigation and search:: +Find the information that you want from a document more quickly and easily. + +Add-ons:: +Customize your TechDocs experience with Add-ons to address higher-order documentation needs. + +The TechDocs plugin is preinstalled and enabled on a {product-short} instance by default. You can disable or enable the TechDocs plugin, and change other parameters, by configuring the {product} Helm chart or the {product} Operator config map. + +[IMPORTANT] +==== +{product} includes a built-in TechDocs builder that generates static HTML documentation from your codebase. However, the default basic setup of the local builder is not intended for production. +==== + +You can use a CI/CD pipeline with the repository that has a dedicated job to generate docs for TechDocs. The generated static files are stored in {odf-name} or in a cloud storage solution of your choice and published to a static HTML documentation site. + +After you configure {odf-name} to store the files that TechDocs generates, you can configure the TechDocs plugin to use the {odf-name} for cloud storage. + +[role="_additional-resources"] +.Additional resources + +* For more information, see link:{LinkPluginsGuide}[Configuring plugins in {product}]. + +//Configuring storage +include::modules/getting-started/con-techdocs-configure-storage.adoc[leveloffset=+1] + +include::modules/getting-started/proc-techdocs-using-odf-storage.adoc[leveloffset=+2] + +include::modules/getting-started/proc-techdocs-configure-odf-helm.adoc[leveloffset=+2] + +include::modules/getting-started/ref-techdocs-example-config-plugin-helm.adoc[leveloffset=+3] + +include::modules/getting-started/proc-techdocs-configure-odf-operator.adoc[leveloffset=+2] + +include::modules/getting-started/ref-techdocs-example-config-plugin-operator.adoc[leveloffset=+3] + +//Configuring CI/CD +include::modules/getting-started/con-techdocs-config-cicd.adoc[leveloffset=+1] + +include::modules/getting-started/proc-techdocs-config-cicd-prep-repo.adoc[leveloffset=+2] + +include::modules/getting-started/proc-techdocs-generate-site.adoc[leveloffset=+2] + +include::modules/getting-started/proc-techdocs-publish-site.adoc[leveloffset=+2] diff --git a/assemblies/assembly-upgrade-rhdh.adoc b/assemblies/assembly-upgrade-rhdh.adoc new file mode 100644 index 0000000000..bab16c2427 --- /dev/null +++ b/assemblies/assembly-upgrade-rhdh.adoc @@ -0,0 +1,7 @@ +:_mod-docs-content-type: ASSEMBLY +[id="assembly-upgrade-rhdh"] += Upgrading {product} +:context: assembly-upgrade-rhdh + +toc::[] + diff --git a/assemblies/assembly_about-rhdh.adoc b/assemblies/assembly_about-rhdh.adoc new file mode 100644 index 0000000000..17c30781f7 --- /dev/null +++ b/assemblies/assembly_about-rhdh.adoc @@ -0,0 +1,26 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-10-07 + +ifdef::context[:parent-context-of-about-rhdh: {context}] + +:_mod-docs-content-type: ASSEMBLY + +ifndef::context[] +[id="about-rhdh"] +endif::[] +ifdef::context[] +[id="about-rhdh_{context}"] +endif::[] += About {product} + +:context: about-rhdh + +{product} is a fully supported, enterprise-grade, open developer platform that you can use to build developer portals. This platform contains a supported and opinionated framework that helps reduce the friction and frustration of developers while boosting productivity. {product} simplifies decision-making by providing a developer experience that presents a selection of internally approved tools, programming languages, and developer resources within a self-managed portal. As a developer, you can use {product} to experience a streamlined development environment. {product} is driven by a centralized software catalog, providing efficiency to your microservices and infrastructure. It enables your product team to deliver quality code without any compromises. + +include::modules/discover/con_benefits-of-rhdh.adoc[leveloffset=+1] + + + + + + diff --git a/assemblies/dynamic-plugins/assembly-about-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-about-rhdh-plugins.adoc new file mode 100644 index 0000000000..b518961b23 --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-about-rhdh-plugins.adoc @@ -0,0 +1,4 @@ +[id="rhdh-about-rhdh-plugins_{context}"] += About {product} plugins + +include::../modules/dynamic-plugins/con-rhdh-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc new file mode 100644 index 0000000000..c151708b37 --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc @@ -0,0 +1,28 @@ +[id="rhdh-configuring-rhdh-plugins_{context}"] += Configuring dynamic plugins in {product} + +// Ansible +include::../modules/dynamic-plugins/con-ansible-plugin-admin.adoc[leveloffset=+1] + +// Argo CD +include::../../artifacts/rhdh-plugins-reference/argocd/argocd-plugin-admin.adoc[leveloffset=+1] + +// Keycloak +include::../../artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc[leveloffset=+1] + +// Nexus +include::../../artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-admin.adoc[leveloffset=+1] + +// Tekton +include::../../artifacts/rhdh-plugins-reference/tekton/tekton-plugin-admin.adoc[leveloffset=+1] + +// Topology +== Installing and configuring the Topology plugin +include::../modules/dynamic-plugins/proc-topology-install.adoc[leveloffset=+2] +include::../modules/dynamic-plugins/proc-topology-configure.adoc[leveloffset=+2] + +// Dynamic plugins cache +include::../modules/dynamic-plugins/con-dynamic-plugins-cache.adoc[ leveloffset=+1] + +// Redis cache +include::../modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-installing-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-installing-rhdh-plugins.adoc new file mode 100644 index 0000000000..70b58f271b --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-installing-rhdh-plugins.adoc @@ -0,0 +1,25 @@ +[id="rhdh-installing-rhdh-plugins_{context}"] += Installing dynamic plugins in {product} + +The dynamic plugin support is based on the backend plugin manager package, which is a service that scans a configured root directory (`dynamicPlugins.rootDirectory` in the `app-config.yaml` file) for dynamic plugin packages and loads them dynamically. + +You can use the dynamic plugins that come preinstalled with {product} or install external dynamic plugins from a public NPM registry. + +// Operator installation +include::../modules/dynamic-plugins/proc-config-dynamic-plugins-rhdh-operator.adoc[leveloffset=+1] + +// Helm installation +include::../modules/dynamic-plugins/con-install-dynamic-plugin-helm.adoc[leveloffset=+1] +include::../modules/dynamic-plugins/proc-obtaining-integrity-checksum.adoc[leveloffset=+2] +include::../modules/dynamic-plugins/ref-example-dynamic-plugin-helm-installations.adoc[leveloffset=+2] +include::../modules/dynamic-plugins/proc-rhdh-example-external-dynamic-plugins.adoc[leveloffset=+2] + +// Air gapped environment +//include::../modules/dynamic-plugins/proc-rhdh-installing-external-dynamic-plugins-airgapped.adoc[leveloffset=+1] +include::../modules/dynamic-plugins/proc-using-custom-npm-registry.adoc[leveloffset=+1] + +// Viewing installed plugins +include::../modules/dynamic-plugins/proc-viewing-installed-plugins.adoc[leveloffset=+1] + +//basic plugin configuration +//include::../modules/dynamic-plugins/con-basic-config-dynamic-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-reference-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-reference-rhdh-plugins.adoc new file mode 100644 index 0000000000..a54d577027 --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-reference-rhdh-plugins.adoc @@ -0,0 +1,11 @@ +[id="rhdh-reference-rhdh-plugins_{context}"] += Plugins reference + +include::../modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc[leveloffset=+1] + +include::../modules/dynamic-plugins/proc-viewing-installed-plugins.adoc[leveloffset=+1] + +[id="rhdh-supported-plugins"] +include::../modules/dynamic-plugins/ref-rh-supported-plugins.adoc[leveloffset=+1] +include::../modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc[leveloffset=+1] +include::../modules/dynamic-plugins/ref-community-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc b/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc new file mode 100644 index 0000000000..47b7c26d9c --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc @@ -0,0 +1,41 @@ +[id="rhdh-installing-dynamic-plugins"] += Dynamic plugin installation + +The dynamic plugin support is based on the backend plugin manager package, which is a service that scans a configured root directory (`dynamicPlugins.rootDirectory` in the app config) for dynamic plugin packages and loads them dynamically. + +You can use the dynamic plugins that come preinstalled with {product} or install external dynamic plugins from a public NPM registry. + +// Preinstalled plugins +include::../modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc[leveloffset=+1] + +// Dynamic plugins tables + +// Red Hat Generally Available supported plugins +include::../modules/dynamic-plugins/ref-rh-supported-plugins.adoc[leveloffset=+3] + +[NOTE] +==== +* For more information about configuring KeyCloak, see link:{plugins-configure-book-url}[{plugins-configure-book-title}]. + +* For more information about configuring TechDocs, see link:{LinkAdminGuide}#assembly-techdocs-plugin_{context}[Configuring the TechDocs plugin in {product}]. +==== + +// Technology preview support statement +include::../../artifacts/snip-dynamic-plugins-support.adoc[leveloffset=+3] + +// Red Hat Technology Preview plugins +[id="rhdh-tech-preview-plugins"] +include::../modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc[leveloffset=+4] + +[NOTE] +==== +* A new Technology Preview plugin for Red Hat Ansible Automation Platform (RHAAP) is available, which replaces this older one. See link:{LinkPluginsGuide}#rhdh-compatible-plugins[Other installable plugins] in the _{NameOfPluginsGuide} guide_ for further details. See xref:rhdh-compatible-plugins[Dynamic plugins support matrix]. +==== + +// Community plugins +[id="rhdh-community-plugins"] +include::../modules/dynamic-plugins/ref-community-plugins.adoc[leveloffset=+4] + +// Red Hat compatible plugins +[id="rhdh-compatible-plugins"] +include::../modules/dynamic-plugins/ref-rh-compatible-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-troubleshooting-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-troubleshooting-rhdh-plugins.adoc new file mode 100644 index 0000000000..566325a98a --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-troubleshooting-rhdh-plugins.adoc @@ -0,0 +1,4 @@ +[id="rhdh-troubleshooting-rhdh-plugins_{context}"] += Troubleshooting {product-short} plugins + +//include::../modules/dynamic-plugins/con-rhdh-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/dynamic-plugins/assembly-using-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-using-rhdh-plugins.adoc new file mode 100644 index 0000000000..12560a1799 --- /dev/null +++ b/assemblies/dynamic-plugins/assembly-using-rhdh-plugins.adoc @@ -0,0 +1,20 @@ +[id="rhdh-using-rhdh-plugins_{context}"] += Using dynamic plugins + +// Ansible +include::../modules/dynamic-plugins/con-ansible-plugin-user.adoc[leveloffset=+1] + +// Argo CD +include::../../artifacts/rhdh-plugins-reference/argocd/argocd-plugin-user.adoc[leveloffset=+1] + +// Keycloak +include::../../artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc[leveloffset=+1] + +// Nexus +include::../../artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-user.adoc[leveloffset=+1] + +// Tekton +include::../../artifacts/rhdh-plugins-reference/tekton/tekton-plugin-user.adoc[leveloffset=+1] + +// Topology +include::../../modules/dynamic-plugins/proc-using-topology-plugin.adoc[leveloffset=+1] \ No newline at end of file diff --git a/assemblies/images b/assemblies/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/assemblies/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/assemblies/modules b/assemblies/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/assemblies/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/build/scripts/build-ccutil.sh b/build/scripts/build-ccutil.sh new file mode 100755 index 0000000000..67ef5737ed --- /dev/null +++ b/build/scripts/build-ccutil.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# +# Copyright (c) 2025 Red Hat, Inc. +# This program and the accompanying materials are made +# available under the terms of the Eclipse Public License 2.0 +# which is available at https://www.eclipse.org/legal/epl-2.0/ +# +# SPDX-License-Identifier: EPL-2.0 +# +# Utility script build html previews with referenced images +# Requires: Podman - see https://podman.io +# input: titles/ +# output: titles-generated/ and titles-generated/$BRANCH/ + +# grep regex for title folders to exclude from processing below +EXCLUDED_TITLES="rhdh-plugins-reference" +BRANCH="main" + +while [[ "$#" -gt 0 ]]; do + case $1 in + '-b') BRANCH="$2"; shift 1;; + esac + shift 1 +done + +rm -fr titles-generated/; +mkdir -p titles-generated/"${BRANCH}"; +echo "Red Hat Developer Hub Documentation Preview - ${BRANCH}
    " > titles-generated/"${BRANCH}"/index.html; +# exclude the rhdh-plugins-reference as it's embedded in the admin guide +# shellcheck disable=SC2044,SC2013 +set -e +for t in $(find titles -name master.adoc | sort -uV | grep -E -v "${EXCLUDED_TITLES}"); do + d=${t%/*}; + dest=${d/titles/titles-generated\/${BRANCH}}; + rm -rf "$d/build" || true + CMD="podman run --interactive --rm --tty \ + --volume "$(pwd)":/docs:Z \ + --workdir "/docs/$d" \ + quay.io/ivanhorvath/ccutil:amazing ccutil compile --format html-single --lang en-US"; + echo -e -n "\nBuilding $t into $dest ...\n "; + echo "${CMD}" | sed -r -e "s/\ +/ \\\\\n /g" + $CMD + rm -rfv "$dest" || true + mv -f "$d/build/tmp/en-US/html-single/" "$dest" + # shellcheck disable=SC2013 + for im in $(grep images/ "$dest/index.html" | grep -E -v 'mask-image|background|fa-icons|jupumbra' | sed -r -e "s#.+(images/[^\"]+)\".+#\1#"); do + # echo " Copy $im ..."; + IMDIR="$dest/${im%/*}/" + mkdir -p "${IMDIR}"; rsync -q "$im" "${IMDIR}"; + done + # shellcheck disable=SC2044 + # for f in $(find "$dest/" -type f); do echo " $f"; done + echo "
  • ${dest/titles-generated\/${BRANCH}\//}
  • " >> titles-generated/"${BRANCH}"/index.html; +done +echo "
" >> titles-generated/"${BRANCH}"/index.html + +# shellcheck disable=SC2143 +if [[ $BRANCH == "pr-"* ]]; then + # fetch the existing https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html to add prs and branches + curl -sSL https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/pulls.html -o titles-generated/pulls.html + if [[ -z $(grep "./${BRANCH}/index.html" titles-generated/pulls.html) ]]; then + echo "Building root index for $BRANCH in titles-generated/pulls.html ..."; + echo "
  • ${BRANCH}
  • " >> titles-generated/pulls.html + fi +else + # fetch the existing https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html to add prs and branches + curl -sSL https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html -o titles-generated/index.html + if [[ -z $(grep "./${BRANCH}/index.html" titles-generated/index.html) ]]; then + echo "Building root index for $BRANCH in titles-generated/index.html ..."; + echo "
  • ${BRANCH}
  • " >> titles-generated/index.html + fi +fi diff --git a/build/scripts/build.sh b/build/scripts/build.sh new file mode 100755 index 0000000000..7d9b149d86 --- /dev/null +++ b/build/scripts/build.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# Copyright (c) 2023-2024 Red Hat, Inc. +# This program and the accompanying materials are made +# available under the terms of the Eclipse Public License 2.0 +# which is available at https://www.eclipse.org/legal/epl-2.0/ +# +# SPDX-License-Identifier: EPL-2.0 +# +# Utility script build html previews with referenced images +# Requires: asciidoctor - see https://docs.asciidoctor.org/asciidoctor/latest/install/linux-packaging/ +# input: titles/ +# output: titles-generated/ and titles-generated/$BRANCH/ + +# grep regex for title folders to exclude from processing below +EXCLUDED_TITLES="rhdh-plugins-reference" +BRANCH="main" + +while [[ "$#" -gt 0 ]]; do + case $1 in + '-b') BRANCH="$2"; shift 1;; + esac + shift 1 +done + +rm -fr titles-generated/; +mkdir -p titles-generated/"${BRANCH}"; +echo "Red Hat Developer Hub Documentation Preview - ${BRANCH}
      " > titles-generated/"${BRANCH}"/index.html; +# exclude the rhdh-plugins-reference as it's embedded in the admin guide +# shellcheck disable=SC2044,SC2013 +set -e +for t in $(find titles -name master.adoc | sort -uV | grep -E -v "${EXCLUDED_TITLES}"); do + d=${t%/*}; d=${d/titles/titles-generated\/${BRANCH}}; + CMD="asciidoctor \ + --backend=html5 \ + --destination-dir $d \ + --failure-level ERROR \ + --section-numbers \ + --trace \ + --warnings \ + -a chapter-signifier=Chapter \ + -a sectnumslevels=5 \ + -a source-highlighter=coderay \ + -a stylesdir=$(pwd)/.asciidoctor \ + -a stylesheet=docs.css \ + -a toc=left \ + -a toclevels=5 \ + -o index.html \ + $t"; + echo -e -n "\nBuilding $t into $d ...\n "; + echo "${CMD}" | sed -r -e "s/\ +/ \\\\\n /g" + $CMD + # shellcheck disable=SC2013 + for im in $(grep images/ "$d/index.html" | grep -E -v 'mask-image|background|fa-icons|jupumbra' | sed -r -e "s#.+(images/[^\"]+)\".+#\1#"); do + # echo " Copy $im ..."; + IMDIR="$d/${im%/*}/" + mkdir -p "${IMDIR}"; rsync -q "$im" "${IMDIR}"; + done + # shellcheck disable=SC2044 + for f in $(find "$d/" -type f); do echo " $f"; done + echo "
    • ${d/titles-generated\/${BRANCH}\//}
    • " >> titles-generated/"${BRANCH}"/index.html; +done +echo "
    " >> titles-generated/"${BRANCH}"/index.html + +# shellcheck disable=SC2143 +if [[ $BRANCH == "pr-"* ]]; then + # fetch the existing https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html to add prs and branches + curl -sSL https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/pulls.html -o titles-generated/pulls.html + if [[ -z $(grep "./${BRANCH}/index.html" titles-generated/pulls.html) ]]; then + echo "Building root index for $BRANCH in titles-generated/pulls.html ..."; + echo "
  • ${BRANCH}
  • " >> titles-generated/pulls.html + fi +else + # fetch the existing https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html to add prs and branches + curl -sSL https://redhat-developer.github.io/red-hat-developers-documentation-rhdh/index.html -o titles-generated/index.html + if [[ -z $(grep "./${BRANCH}/index.html" titles-generated/index.html) ]]; then + echo "Building root index for $BRANCH in titles-generated/index.html ..."; + echo "
  • ${BRANCH}
  • " >> titles-generated/index.html + fi +fi diff --git a/build/templates/assembly.adoc.jinja b/build/templates/assembly.adoc.jinja new file mode 100644 index 0000000000..ec4de48353 --- /dev/null +++ b/build/templates/assembly.adoc.jinja @@ -0,0 +1,12 @@ +:_content-type: ASSEMBLY +[id="{{ assembly_id }}"] += {{ assembly_title }} + +{{ assembly_introduction | default(omit) }} +{% for issue in vars %} +include::modules/release-notes/snip-{{ issue.fields.customfield_12320850 | lower | replace(" ", "-") }}-{{ issue.key | lower }}.adoc[leveloffset=+1] + +{% endfor %} +{% if not vars %} +None. +{% endif %} diff --git a/build/templates/snippet-security.adoc.jinja2 b/build/templates/snippet-security.adoc.jinja2 new file mode 100644 index 0000000000..782002e6fd --- /dev/null +++ b/build/templates/snippet-security.adoc.jinja2 @@ -0,0 +1 @@ +{{ text }} diff --git a/build/templates/snippet-with-jira-link.adoc.jinja2 b/build/templates/snippet-with-jira-link.adoc.jinja2 new file mode 100644 index 0000000000..27b1c7f65c --- /dev/null +++ b/build/templates/snippet-with-jira-link.adoc.jinja2 @@ -0,0 +1,7 @@ +[id="{{ id }}"] += {{ title }} + +{{ text }} + +.Additional resources +* link:https://issues.redhat.com/browse/{{ key }}[{{ key }}] diff --git a/build/templates/snippet-without-jira-link.adoc.jinja2 b/build/templates/snippet-without-jira-link.adoc.jinja2 new file mode 100644 index 0000000000..0b45157dad --- /dev/null +++ b/build/templates/snippet-without-jira-link.adoc.jinja2 @@ -0,0 +1,7 @@ +[id="{{ id }}"] += {{ title }} + +{{ text }} + +// .Additional resources +// * link:https://issues.redhat.com/browse/{{ key }}[{{ key }}] diff --git a/images/rhdh-plugins-reference/aap-backend-plugin-user1.png b/images/rhdh-plugins-reference/aap-backend-plugin-user1.png new file mode 100644 index 0000000000..d55458b1e8 Binary files /dev/null and b/images/rhdh-plugins-reference/aap-backend-plugin-user1.png differ diff --git a/images/rhdh-plugins-reference/aap-backend-plugin-user2.png b/images/rhdh-plugins-reference/aap-backend-plugin-user2.png new file mode 100644 index 0000000000..fc081e49c4 Binary files /dev/null and b/images/rhdh-plugins-reference/aap-backend-plugin-user2.png differ diff --git a/images/rhdh-plugins-reference/acr-plugin-user1.png b/images/rhdh-plugins-reference/acr-plugin-user1.png new file mode 100644 index 0000000000..5252732ecb Binary files /dev/null and b/images/rhdh-plugins-reference/acr-plugin-user1.png differ diff --git a/images/rhdh-plugins-reference/argo-cd-plugin-history.png b/images/rhdh-plugins-reference/argo-cd-plugin-history.png new file mode 100644 index 0000000000..21374a4667 Binary files /dev/null and b/images/rhdh-plugins-reference/argo-cd-plugin-history.png differ diff --git a/images/rhdh-plugins-reference/argo-cd-plugin-overview-card-details.png b/images/rhdh-plugins-reference/argo-cd-plugin-overview-card-details.png new file mode 100644 index 0000000000..8c5cf048bb Binary files /dev/null and b/images/rhdh-plugins-reference/argo-cd-plugin-overview-card-details.png differ diff --git a/images/rhdh-plugins-reference/argo-cd-plugin-overview-card.png b/images/rhdh-plugins-reference/argo-cd-plugin-overview-card.png new file mode 100644 index 0000000000..ea18dd1004 Binary files /dev/null and b/images/rhdh-plugins-reference/argo-cd-plugin-overview-card.png differ diff --git a/images/rhdh-plugins-reference/argocd.png b/images/rhdh-plugins-reference/argocd.png new file mode 100644 index 0000000000..d8efd01aca Binary files /dev/null and b/images/rhdh-plugins-reference/argocd.png differ diff --git a/images/rhdh-plugins-reference/deployment_summary.png b/images/rhdh-plugins-reference/deployment_summary.png new file mode 100644 index 0000000000..a24b5cc882 Binary files /dev/null and b/images/rhdh-plugins-reference/deployment_summary.png differ diff --git a/images/rhdh-plugins-reference/group1.jpg b/images/rhdh-plugins-reference/group1.jpg new file mode 100644 index 0000000000..4b9a277ac4 Binary files /dev/null and b/images/rhdh-plugins-reference/group1.jpg differ diff --git a/images/rhdh-plugins-reference/jfrog-plugin-user1.png b/images/rhdh-plugins-reference/jfrog-plugin-user1.png new file mode 100644 index 0000000000..ff04ff1e4e Binary files /dev/null and b/images/rhdh-plugins-reference/jfrog-plugin-user1.png differ diff --git a/images/rhdh-plugins-reference/link.png b/images/rhdh-plugins-reference/link.png new file mode 100644 index 0000000000..bf5e4887af Binary files /dev/null and b/images/rhdh-plugins-reference/link.png differ diff --git a/images/rhdh-plugins-reference/nexus-repository-manager.png b/images/rhdh-plugins-reference/nexus-repository-manager.png new file mode 100644 index 0000000000..fdb63a1de0 Binary files /dev/null and b/images/rhdh-plugins-reference/nexus-repository-manager.png differ diff --git a/images/rhdh-plugins-reference/ocm-plugin-user1.png b/images/rhdh-plugins-reference/ocm-plugin-user1.png new file mode 100644 index 0000000000..3550b5463f Binary files /dev/null and b/images/rhdh-plugins-reference/ocm-plugin-user1.png differ diff --git a/images/rhdh-plugins-reference/ocm-plugin-user2.png b/images/rhdh-plugins-reference/ocm-plugin-user2.png new file mode 100644 index 0000000000..49c6211334 Binary files /dev/null and b/images/rhdh-plugins-reference/ocm-plugin-user2.png differ diff --git a/images/rhdh-plugins-reference/quay-plugin-backstage1.png b/images/rhdh-plugins-reference/quay-plugin-backstage1.png new file mode 100644 index 0000000000..0b732eab34 Binary files /dev/null and b/images/rhdh-plugins-reference/quay-plugin-backstage1.png differ diff --git a/images/rhdh-plugins-reference/quay-plugin-backstage2.png b/images/rhdh-plugins-reference/quay-plugin-backstage2.png new file mode 100644 index 0000000000..9ea03e373b Binary files /dev/null and b/images/rhdh-plugins-reference/quay-plugin-backstage2.png differ diff --git a/images/rhdh-plugins-reference/quay-plugin-backstage3.png b/images/rhdh-plugins-reference/quay-plugin-backstage3.png new file mode 100644 index 0000000000..02aa179e57 Binary files /dev/null and b/images/rhdh-plugins-reference/quay-plugin-backstage3.png differ diff --git a/images/rhdh-plugins-reference/sidebar.png b/images/rhdh-plugins-reference/sidebar.png new file mode 100644 index 0000000000..5a61b95864 Binary files /dev/null and b/images/rhdh-plugins-reference/sidebar.png differ diff --git a/images/rhdh-plugins-reference/tekton-plugin-pipeline-expand.png b/images/rhdh-plugins-reference/tekton-plugin-pipeline-expand.png new file mode 100644 index 0000000000..b4b7872ac3 Binary files /dev/null and b/images/rhdh-plugins-reference/tekton-plugin-pipeline-expand.png differ diff --git a/images/rhdh-plugins-reference/tekton-plugin-pipeline.png b/images/rhdh-plugins-reference/tekton-plugin-pipeline.png new file mode 100644 index 0000000000..c6296a1305 Binary files /dev/null and b/images/rhdh-plugins-reference/tekton-plugin-pipeline.png differ diff --git a/images/rhdh-plugins-reference/tekton-plugin-user1.png b/images/rhdh-plugins-reference/tekton-plugin-user1.png new file mode 100644 index 0000000000..c45dcade4c Binary files /dev/null and b/images/rhdh-plugins-reference/tekton-plugin-user1.png differ diff --git a/images/rhdh-plugins-reference/tekton-plugin-user2.png b/images/rhdh-plugins-reference/tekton-plugin-user2.png new file mode 100644 index 0000000000..38ff4cd33e Binary files /dev/null and b/images/rhdh-plugins-reference/tekton-plugin-user2.png differ diff --git a/images/rhdh-plugins-reference/tekton-plugin-user3.png b/images/rhdh-plugins-reference/tekton-plugin-user3.png new file mode 100644 index 0000000000..cbe6aabde6 Binary files /dev/null and b/images/rhdh-plugins-reference/tekton-plugin-user3.png differ diff --git a/images/rhdh-plugins-reference/topology-tab-user1.png b/images/rhdh-plugins-reference/topology-tab-user1.png new file mode 100644 index 0000000000..4d8338ce35 Binary files /dev/null and b/images/rhdh-plugins-reference/topology-tab-user1.png differ diff --git a/images/rhdh-plugins-reference/topology-tab-user2.png b/images/rhdh-plugins-reference/topology-tab-user2.png new file mode 100644 index 0000000000..e01fd70953 Binary files /dev/null and b/images/rhdh-plugins-reference/topology-tab-user2.png differ diff --git a/images/rhdh-plugins-reference/topology-tab-user3.png b/images/rhdh-plugins-reference/topology-tab-user3.png new file mode 100644 index 0000000000..23862ec211 Binary files /dev/null and b/images/rhdh-plugins-reference/topology-tab-user3.png differ diff --git a/images/rhdh-plugins-reference/user-list.jpg b/images/rhdh-plugins-reference/user-list.jpg new file mode 100644 index 0000000000..344fd51353 Binary files /dev/null and b/images/rhdh-plugins-reference/user-list.jpg differ diff --git a/images/rhdh-plugins-reference/user2.jpg b/images/rhdh-plugins-reference/user2.jpg new file mode 100644 index 0000000000..95d29b7666 Binary files /dev/null and b/images/rhdh-plugins-reference/user2.jpg differ diff --git a/images/rhdh-plugins-reference/users.jpg b/images/rhdh-plugins-reference/users.jpg new file mode 100644 index 0000000000..393f14bdd5 Binary files /dev/null and b/images/rhdh-plugins-reference/users.jpg differ diff --git a/images/rhdh/disabling-telemetry.png b/images/rhdh/disabling-telemetry.png new file mode 100644 index 0000000000..ead66f664b Binary files /dev/null and b/images/rhdh/disabling-telemetry.png differ diff --git a/images/rhdh/enable-gh-discovery.png b/images/rhdh/enable-gh-discovery.png new file mode 100644 index 0000000000..0b76eb16f6 Binary files /dev/null and b/images/rhdh/enable-gh-discovery.png differ diff --git a/images/rhdh/enable-gh-member-discovery.png b/images/rhdh/enable-gh-member-discovery.png new file mode 100644 index 0000000000..9a2897ca03 Binary files /dev/null and b/images/rhdh/enable-gh-member-discovery.png differ diff --git a/images/rhdh/example-callback-url.png b/images/rhdh/example-callback-url.png new file mode 100644 index 0000000000..1a8ce804b5 Binary files /dev/null and b/images/rhdh/example-callback-url.png differ diff --git a/images/rhdh/helm-upgrade.png b/images/rhdh/helm-upgrade.png new file mode 100644 index 0000000000..c2568814ee Binary files /dev/null and b/images/rhdh/helm-upgrade.png differ diff --git a/images/rhdh/operator-install-1.png b/images/rhdh/operator-install-1.png new file mode 100644 index 0000000000..d2d4a34425 Binary files /dev/null and b/images/rhdh/operator-install-1.png differ diff --git a/images/rhdh/operator-install-2.png b/images/rhdh/operator-install-2.png new file mode 100644 index 0000000000..479ac57d52 Binary files /dev/null and b/images/rhdh/operator-install-2.png differ diff --git a/images/rhdh/rhdh-helm-install.png b/images/rhdh/rhdh-helm-install.png new file mode 100644 index 0000000000..b7a1958055 Binary files /dev/null and b/images/rhdh/rhdh-helm-install.png differ diff --git a/images/rhdh/segment-source-helm.png b/images/rhdh/segment-source-helm.png new file mode 100644 index 0000000000..0b903b7eab Binary files /dev/null and b/images/rhdh/segment-source-helm.png differ diff --git a/images/rhdh/template-editor.png b/images/rhdh/template-editor.png new file mode 100644 index 0000000000..43b7497cf9 Binary files /dev/null and b/images/rhdh/template-editor.png differ diff --git a/images/rhdh/upgrade-helm-metrics.png b/images/rhdh/upgrade-helm-metrics.png new file mode 100644 index 0000000000..2bffa0c7f6 Binary files /dev/null and b/images/rhdh/upgrade-helm-metrics.png differ diff --git a/images/user-guide/custom-theme-mode-1.png b/images/user-guide/custom-theme-mode-1.png new file mode 100644 index 0000000000..377a15c9bb Binary files /dev/null and b/images/user-guide/custom-theme-mode-1.png differ diff --git a/jira2asciidoc.yml b/jira2asciidoc.yml new file mode 100644 index 0000000000..b99a7c824c --- /dev/null +++ b/jira2asciidoc.yml @@ -0,0 +1,94 @@ +--- +jira: + server: 'https://issues.redhat.com' +product: + version: + minor_glob: 1.3.* + patch: 1.3 +sections: + - id: new-features + title: New features + description: | + This section highlights new features in {product} {product-version}. + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND status in (Closed, "Release Pending") + AND "Release Note Type" in ("Feature", "Enhancement") + AND (fixVersion ~ "{version_minor_glob}" OR fixVersion = "{version_patch}") + ORDER BY key + template: without-jira-link + - id: breaking-changes + title: Breaking changes + description: | + This section lists breaking changes in {product} {product-version}. + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND status in (Closed, "Release Pending") + AND "Release Note Type" in ("Removed Functionality") + AND (fixVersion ~ "{version_minor_glob}" OR fixVersion = "{version_patch}") + ORDER BY key + template: with-jira-link + - id: deprecated-functionalities + title: Deprecated functionalities + description: | + This section lists deprecated functionalities in {product} {product-version}. + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND status in (Closed, "Release Pending") + AND "Release Note Type" in ("Deprecated Functionality") + AND (fixVersion ~ "{version_minor_glob}" OR fixVersion = "{version_patch}") + ORDER BY key + template: with-jira-link + - id: technology-preview + title: Technology Preview + description: | + This section lists Technology Preview features in {product} {product-version}. + + [IMPORTANT] + ==== + Technology Preview features provide early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process. + However, these features are not fully supported under Red Hat Subscription Level Agreements, may not be functionally complete, and are not intended for production use. + As Red Hat considers making future iterations of Technology Preview features generally available, we will attempt to resolve any issues that customers experience when using these features. + See: link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview support scope]. + ==== + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND status in (Closed, "Release Pending") + AND "Release Note Type" in ("Developer Preview", "Technology Preview") + AND (fixVersion ~ "{version_minor_glob}" OR fixVersion = "{version_patch}") + ORDER BY key + template: with-jira-link + - id: fixed-issues + title: Fixed issues + description: | + This section lists issues fixed in {product} {product-version}. + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND status in (Closed, "Release Pending") + AND "Release Note Type" = "Bug Fix" + AND (fixVersion ~ "{version_minor_glob}" OR fixVersion = "{version_patch}") + ORDER BY key + template: with-jira-link + - id: known-issues + title: Known issues + description: | + This section lists known issues in {product} {product-version}. + query: > + project = "Red Hat Internal Developer Platform" + AND "Release Note Status" = "Done" + AND level is EMPTY + AND "Release Note Type" in ("Known Issue") + AND affectedVersion <= "{version_patch}" + AND (fixVersion > "{version_patch}" OR fixVersion is EMPTY) + ORDER BY key DESC + template: with-jira-link diff --git a/modules/admin/proc-configuring-postgresql-instance-using-helm.adoc b/modules/admin/proc-configuring-postgresql-instance-using-helm.adoc new file mode 100644 index 0000000000..f45bfec0ee --- /dev/null +++ b/modules/admin/proc-configuring-postgresql-instance-using-helm.adoc @@ -0,0 +1,162 @@ +[id="proc-configuring-postgresql-instance-using-helm_{context}"] += Configuring an external PostgreSQL instance using the Helm Chart + +You can configure an external PostgreSQL instance by using the Helm Chart. By default, the Helm Chart creates and manages a local instance of PostgreSQL in the same namespace where you have deployed the {product-very-short} instance. However, you can change this default setting to configure an external PostgreSQL database server, for example, Amazon Web Services (AWS) Relational Database Service (RDS) or Azure database. + +.Prerequisites + +* You are using a supported version of PostgreSQL. For more information, see the link:https://access.redhat.com/support/policy/updates/developerhub[Product life cycle page]. +* You have the following details: +** `db-host`: Denotes your PostgreSQL instance Domain Name System (DNS) or IP address +** `db-port`: Denotes your PostgreSQL instance port number, such as `5432` +** `username`: Denotes the user name to connect to your PostgreSQL instance +** `password`: Denotes the password to connect to your PostgreSQL instance +* You have installed the {product-very-short} application by using the Helm Chart. +* Optional: You have a CA certificate, Transport Layer Security (TLS) private key, and TLS certificate so that you can secure your database connection by using the TLS protocol. For more information, refer to your PostgreSQL vendor documentation. + +[NOTE] +==== +By default, {product-short} uses a database for each plugin and automatically creates it if none is found. You might need the `Create Database` privilege in addition to `PSQL Database` privileges for configuring an external PostgreSQL instance. +==== + + +.Procedure + +. Optional: Create a certificate secret to configure your PostgreSQL instance with a TLS connection: ++ +[source,terminal] +---- +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: <1> +type: Opaque +stringData: + postgres-ca.pem: |- + -----BEGIN CERTIFICATE----- + <2> + postgres-key.key: |- + -----BEGIN CERTIFICATE----- + <3> + postgres-crt.pem: |- + -----BEGIN CERTIFICATE----- + <4> + # ... +EOF +---- +<1> Provide the name of the certificate secret. +<2> Provide the CA certificate key. +<3> Optional: Provide the TLS private key. +<4> Optional: Provide the TLS certificate key. + +. Create a credential secret to connect with the PostgreSQL instance: ++ +[source,terminal] +---- +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: <1> +type: Opaque +stringData: <2> + POSTGRES_PASSWORD: + POSTGRES_PORT: "" + POSTGRES_USER: + POSTGRES_HOST: + PGSSLMODE: # for TLS connection <3> + NODE_EXTRA_CA_CERTS: # for TLS connection, e.g. /opt/app-root/src/postgres-crt.pem <4> +EOF +---- +<1> Provide the name of the credential secret. +<2> Provide credential data to connect with your PostgreSQL instance. +<3> Optional: Provide the value based on the required link:https://www.postgresql.org/docs/15/libpq-connect.html#LIBPQ-CONNECT-SSLMODE[Secure Sockets Layer (SSL) mode]. +<4> Optional: Provide the value only if you need a TLS connection for your PostgreSQL instance. + +. Configure your PostgreSQL instance in the Helm configuration file named `values.yaml`: ++ +[source,yaml] +---- +# ... +upstream: + postgresql: + enabled: false # disable PostgreSQL instance creation <1> + auth: + existingSecret: # inject credentials secret to Backstage <2> + backstage: + appConfig: + backend: + database: + connection: # configure Backstage DB connection parameters + host: ${POSTGRES_HOST} + port: ${POSTGRES_PORT} + user: ${POSTGRES_USER} + password: ${POSTGRES_PASSWORD} + ssl: + rejectUnauthorized: true, + ca: + $file: /opt/app-root/src/postgres-ca.pem + key: + $file: /opt/app-root/src/postgres-key.key + cert: + $file: /opt/app-root/src/postgres-crt.pem + extraEnvVarsSecrets: + - # inject credentials secret to Backstage <3> + extraEnvVars: + - name: BACKEND_SECRET + valueFrom: + secretKeyRef: + key: backend-secret + name: '{{ include "janus-idp.backend-secret-name" $ }}' + extraVolumeMounts: + - mountPath: /opt/app-root/src/dynamic-plugins-root + name: dynamic-plugins-root + - mountPath: /opt/app-root/src/postgres-crt.pem + name: postgres-crt # inject TLS certificate to Backstage cont. <4> + subPath: postgres-crt.pem + - mountPath: /opt/app-root/src/postgres-ca.pem + name: postgres-ca # inject CA certificate to Backstage cont. <5> + subPath: postgres-ca.pem + - mountPath: /opt/app-root/src/postgres-key.key + name: postgres-key # inject TLS private key to Backstage cont. <6> + subPath: postgres-key.key + extraVolumes: + - ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + name: dynamic-plugins-root + - configMap: + defaultMode: 420 + name: dynamic-plugins + optional: true + name: dynamic-plugins + - name: dynamic-plugins-npmrc + secret: + defaultMode: 420 + optional: true + secretName: dynamic-plugins-npmrc + - name: postgres-crt + secret: + secretName: <7> + # ... +---- +<1> Set the value of the `upstream.postgresql.enabled` parameter to `false` to disable creating local PostgreSQL instances. +<2> Provide the name of the credential secret. +<3> Provide the name of the credential secret. +<4> Optional: Provide the name of the TLS certificate only for a TLS connection. +<5> Optional: Provide the name of the CA certificate only for a TLS connection. +<6> Optional: Provide the name of the TLS private key only if your TLS connection requires a private key. +<7> Provide the name of the certificate secret if you have configured a TLS connection. + +. Apply the configuration changes in your Helm configuration file named `values.yaml`: ++ +[source,terminal,subs="attributes+"] +---- +helm upgrade -n openshift-helm-charts/redhat-developer-hub -f values.yaml --version {product-chart-version} +---- diff --git a/modules/admin/proc-configuring-postgresql-instance-using-operator.adoc b/modules/admin/proc-configuring-postgresql-instance-using-operator.adoc new file mode 100644 index 0000000000..3389eb0b49 --- /dev/null +++ b/modules/admin/proc-configuring-postgresql-instance-using-operator.adoc @@ -0,0 +1,109 @@ +[id="proc-configuring-postgresql-instance-using-operator_{context}"] += Configuring an external PostgreSQL instance using the Operator + +You can configure an external PostgreSQL instance using the {product} Operator. By default, the Operator creates and manages a local instance of PostgreSQL in the same namespace where you have deployed the {product-very-short} instance. However, you can change this default setting to configure an external PostgreSQL database server, for example, Amazon Web Services (AWS) Relational Database Service (RDS) or Azure database. + +.Prerequisites + +* You are using a supported version of PostgreSQL. For more information, see the link:https://access.redhat.com/support/policy/updates/developerhub[Product life cycle page]. +* You have the following details: +** `db-host`: Denotes your PostgreSQL instance Domain Name System (DNS) or IP address +** `db-port`: Denotes your PostgreSQL instance port number, such as `5432` +** `username`: Denotes the user name to connect to your PostgreSQL instance +** `password`: Denotes the password to connect to your PostgreSQL instance +* You have installed the {product} Operator. +* Optional: You have a CA certificate, Transport Layer Security (TLS) private key, and TLS certificate so that you can secure your database connection by using the TLS protocol. For more information, refer to your PostgreSQL vendor documentation. + +[NOTE] +==== +By default, {product-short} uses a database for each plugin and automatically creates it if none is found. You might need the `Create Database` privilege in addition to `PSQL Database` privileges for configuring an external PostgreSQL instance. +==== + + +.Procedure + +. Optional: Create a certificate secret to configure your PostgreSQL instance with a TLS connection: ++ +[source,yaml] +---- +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: <1> +type: Opaque +stringData: + postgres-ca.pem: |- + -----BEGIN CERTIFICATE----- + <2> + postgres-key.key: |- + -----BEGIN CERTIFICATE----- + <3> + postgres-crt.pem: |- + -----BEGIN CERTIFICATE----- + <4> + # ... +EOF +---- +<1> Provide the name of the certificate secret. +<2> Provide the CA certificate key. +<3> Optional: Provide the TLS private key. +<4> Optional: Provide the TLS certificate key. + +. Create a credential secret to connect with the PostgreSQL instance: ++ +[source,yaml] +---- +cat < create -f - +apiVersion: v1 +kind: Secret +metadata: + name: <1> +type: Opaque +stringData: <2> + POSTGRES_PASSWORD: + POSTGRES_PORT: "" + POSTGRES_USER: + POSTGRES_HOST: + PGSSLMODE: # for TLS connection <3> + NODE_EXTRA_CA_CERTS: # for TLS connection, e.g. /opt/app-root/src/postgres-crt.pem <4> +EOF +---- +<1> Provide the name of the credential secret. +<2> Provide credential data to connect with your PostgreSQL instance. +<3> Optional: Provide the value based on the required link:https://www.postgresql.org/docs/15/libpq-connect.html#LIBPQ-CONNECT-SSLMODE[Secure Sockets Layer (SSL) mode]. +<4> Optional: Provide the value only if you need a TLS connection for your PostgreSQL instance. + +. Create a `Backstage` custom resource (CR): ++ +[source,terminal] +---- +cat < create -f - +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: +spec: + database: + enableLocalDb: false <1> + application: + extraFiles: + mountPath: # e g /opt/app-root/src + secrets: + - name: <2> + key: postgres-crt.pem, postgres-ca.pem, postgres-key.key # key name as in Secret + extraEnvs: + secrets: + - name: <3> + # ... +---- +<1> Set the value of the `enableLocalDb` parameter to `false` to disable creating local PostgreSQL instances. +<2> Provide the name of the certificate secret if you have configured a TLS connection. +<3> Provide the name of the credential secret that you created. ++ +[NOTE] +==== +The environment variables listed in the `Backstage` CR work with the Operator default configuration. If you have changed the Operator default configuration, you must reconfigure the `Backstage` CR accordingly. +==== + +. Apply the `Backstage` CR to the namespace where you have deployed the {product-very-short} instance. diff --git a/modules/admin/proc-configuring-proxy-in-helm-deployment.adoc b/modules/admin/proc-configuring-proxy-in-helm-deployment.adoc new file mode 100644 index 0000000000..cb679b0940 --- /dev/null +++ b/modules/admin/proc-configuring-proxy-in-helm-deployment.adoc @@ -0,0 +1,48 @@ +[id="proc-configuring-proxy-in-helm-deployment_{context}"] += Configuring proxy information in Helm deployment + +For Helm-based deployment, either a developer or a cluster administrator with permissions to create resources in the cluster can configure the proxy variables in a `values.yaml` Helm configuration file. + +.Prerequisites + +* You have installed the {product} application. + +.Procedure + +. Set the proxy information in your Helm configuration file: ++ +[source,yaml] +---- +upstream: + backstage: + extraEnvVars: + - name: HTTP_PROXY + value: '' + - name: HTTPS_PROXY + value: '' + - name: NO_PROXY + value: '' +---- ++ +Where, + +``:: Denotes a variable that you must replace with the HTTP proxy URL. +``:: Denotes a variable that you must replace with the HTTPS proxy URL. +``:: Denotes a variable that you must replace with comma-separated URLs, which you want to exclude from proxying, for example, `foo.com,baz.com`. ++ +.Example: Setting proxy variables using Helm Chart + +[source,yaml] +---- +upstream: + backstage: + extraEnvVars: + - name: HTTP_PROXY + value: 'http://10.10.10.105:3128' + - name: HTTPS_PROXY + value: 'http://10.10.10.106:3128' + - name: NO_PROXY + value: 'localhost,example.org' +---- + +. Save the configuration changes. diff --git a/modules/admin/proc-configuring-proxy-in-operator-deployment.adoc b/modules/admin/proc-configuring-proxy-in-operator-deployment.adoc new file mode 100644 index 0000000000..8e741ec5ff --- /dev/null +++ b/modules/admin/proc-configuring-proxy-in-operator-deployment.adoc @@ -0,0 +1,80 @@ +[id="proc-configuring-proxy-in-operator-deployment_{context}"] += Configuring proxy information in Operator deployment + +For Operator-based deployment, the approach you use for proxy configuration is based on your role: + +* As a cluster administrator with access to the Operator namespace, you can configure the proxy variables in the Operator's default ConfigMap file. This configuration applies the proxy settings to all the users of the Operator. +* As a developer, you can configure the proxy variables in a custom resource (CR) file. This configuration applies the proxy settings to the {product-very-short} application created from that CR. + +.Prerequisites + +* You have installed the {product} application. + +.Procedure + +. Perform one of the following steps based on your role: + +* As an administrator, set the proxy information in the Operator's default ConfigMap file: ++ +.. Search for a ConfigMap file named `backstage-default-config` in the default namespace `rhdh-operator` and open it. +.. Find the `deployment.yaml` key. +.. Set the value of the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the `Deployment` spec as shown in the following example: ++ +.Example: Setting proxy variables in a ConfigMap file +[source,yaml] +---- +# Other fields omitted + deployment.yaml: |- + apiVersion: apps/v1 + kind: Deployment + spec: + template: + spec: + # Other fields omitted + initContainers: + - name: install-dynamic-plugins + # command omitted + env: + - name: NPM_CONFIG_USERCONFIG + value: /opt/app-root/src/.npmrc.dynamic-plugins + - name: HTTP_PROXY + value: 'http://10.10.10.105:3128' + - name: HTTPS_PROXY + value: 'http://10.10.10.106:3128' + - name: NO_PROXY + value: 'localhost,example.org' + # Other fields omitted + containers: + - name: backstage-backend + # Other fields omitted + env: + - name: APP_CONFIG_backend_listen_port + value: "7007" + - name: HTTP_PROXY + value: 'http://10.10.10.105:3128' + - name: HTTPS_PROXY + value: 'http://10.10.10.106:3128' + - name: NO_PROXY + value: 'localhost,example.org' +---- + + +* As a developer, set the proxy information in your custom resource (CR) file as shown in the following example: ++ +.Example: Setting proxy variables in a CR file +[source, yaml] +---- +spec: + # Other fields omitted + application: + extraEnvs: + envs: + - name: HTTP_PROXY + value: 'http://10.10.10.105:3128' + - name: HTTPS_PROXY + value: 'http://10.10.10.106:3128' + - name: NO_PROXY + value: 'localhost,example.org' +---- + +. Save the configuration changes. diff --git a/modules/admin/proc-customizing-telemetry-segment-using-helm.adoc b/modules/admin/proc-customizing-telemetry-segment-using-helm.adoc new file mode 100644 index 0000000000..3b61cbb981 --- /dev/null +++ b/modules/admin/proc-customizing-telemetry-segment-using-helm.adoc @@ -0,0 +1,42 @@ +[id="proc-customizing-telemetry-segment-using-helm_{context}"] += Customizing telemetry Segment source using the Helm Chart + +You can configure integration with your Segment source by using the Helm Chart. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Helm Chart. + +.Procedure + +. In the *Developer* perspective of the {ocp-short} web console, go to the *Helm* view to see the list of Helm releases. +. Click the *overflow* menu on the Helm release that you want to use and select *Upgrade*. +. Use either the *Form* view or *YAML* view to edit the Helm configuration: +** Using *Form view* ++ +.. Expand *Root Schema → Backstage Chart Schema → Backstage Parameters → Backstage container environment variables*. +.. Click the *Add Backstage container environment variables* link. +.. Enter the name and value of the Segment key. ++ +image::rhdh/segment-source-helm.png[] + +.. Click *Upgrade*. + +** Using *YAML view* ++ +.. Add the following YAML code in your Helm configuration file: ++ +[source,yaml] +---- +# ... +upstream: + backstage: + extraEnvVars: + - name: SEGMENT_WRITE_KEY + value: # <1> +# ... +---- +<1> Replace `` with a unique identifier for your Segment source. + +.. Click *Upgrade*. diff --git a/modules/admin/proc-customizing-telemetry-segment-using-operator.adoc b/modules/admin/proc-customizing-telemetry-segment-using-operator.adoc new file mode 100644 index 0000000000..b3ecb6a502 --- /dev/null +++ b/modules/admin/proc-customizing-telemetry-segment-using-operator.adoc @@ -0,0 +1,28 @@ +[id="proc-customizing-telemetry-segment-using-operator_{context}"] += Customizing telemetry Segment source using the Operator + +You can configure integration with your Segment source by using the Operator. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Operator. + +.Procedure + +. Add the following YAML code in your `Backstage` custom resource (CR): ++ +[source,yaml] +---- +# ... +spec: + application: + extraEnvs: + envs: + - name: SEGMENT_WRITE_KEY + value: # <1> +# ... +---- +<1> Replace `` with a unique identifier for your Segment source. + +. Save the configuration changes. diff --git a/modules/admin/proc-disabling-telemetry-using-helm.adoc b/modules/admin/proc-disabling-telemetry-using-helm.adoc new file mode 100644 index 0000000000..625bb9e58b --- /dev/null +++ b/modules/admin/proc-disabling-telemetry-using-helm.adoc @@ -0,0 +1,61 @@ +[id="proc-disabling-telemetry-using-helm_{context}"] += Disabling telemetry data collection using the Helm Chart + +You can disable the telemetry data collection feature by using the Helm Chart. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Helm Chart. + +.Procedure + +. In the *Developer* perspective of the {ocp-short} web console, go to the *Helm* view to see the list of Helm releases. +. Click the *overflow* menu on the Helm release that you want to use and select *Upgrade*. ++ +[NOTE] +==== +You can also create a new Helm release by clicking the *Create* button and edit the configuration to disable telemetry. +==== + +. Use either the *Form* view or *YAML* view to edit the Helm configuration: +** Using *Form view* ++ +.. Expand *Root Schema → global → Dynamic plugins configuration. → List of dynamic plugins that should be installed in the backstage application*. +.. Click the *Add list of dynamic plugins that should be installed in the backstage application.* link. + +.. Perform one of the following steps: ++ +*** If you have not configured the plugin, add the following value in the *Package specification of the dynamic plugin to install. It should be usable by the npm pack command.* field: ++ +`./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment` ++ +image::rhdh/disabling-telemetry.png[] ++ +*** If you have configured the plugin, find the *Package specification of the dynamic plugin to install. It should be usable by the npm pack command.* field with the `./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment` value. + +.. Select the *Disable the plugin* checkbox. +.. Click *Upgrade*. + +** Using *YAML view* ++ +.. Perform one of the following steps: ++ +*** If you have not configured the plugin, add the following YAML code in your `values.yaml` Helm configuration file: ++ +[source,yaml] +---- +# ... +global: + dynamic: + plugins: + - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' + disabled: true +# ... +---- ++ +*** If you have configured the plugin, search it in your Helm configuration and set the value of the `plugins.disabled` parameter to `true`. + +.. Click *Upgrade*. + + diff --git a/modules/admin/proc-disabling-telemetry-using-operator.adoc b/modules/admin/proc-disabling-telemetry-using-operator.adoc new file mode 100644 index 0000000000..66c07efb10 --- /dev/null +++ b/modules/admin/proc-disabling-telemetry-using-operator.adoc @@ -0,0 +1,49 @@ +[id="proc-disabling-telemetry-using-operator_{context}"] += Disabling telemetry data collection using the Operator + +You can disable the telemetry data collection feature by using the Operator. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Operator. + +.Procedure + +. Perform one of the following steps: ++ +* If you have created the `dynamic-plugins-rhdh` ConfigMap file and not configured the `analytics-provider-segment` plugin, add the plugin to the list of plugins and set its `plugins.disabled` parameter to `true`. ++ +* If you have created the `dynamic-plugins-rhdh` ConfigMap file and configured the `analytics-provider-segment` plugin, search the plugin in the list of plugins and set its `plugins.disabled` parameter to `true`. ++ +* If you have not created the ConfigMap file, create it with the following YAML code: ++ +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dynamic-plugins-rhdh +data: + dynamic-plugins.yaml: | + includes: + - dynamic-plugins.default.yaml + plugins: + - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' + disabled: true +---- + +. Set the value of the `dynamicPluginsConfigMapName` parameter to the name of the ConfigMap file in your `Backstage` custom resource: ++ +[source,yaml] +---- +# ... +spec: + application: + dynamicPluginsConfigMapName: dynamic-plugins-rhdh +# ... +---- + +. Save the configuration changes. + + diff --git a/modules/admin/proc-enabling-telemetry-using-helm.adoc b/modules/admin/proc-enabling-telemetry-using-helm.adoc new file mode 100644 index 0000000000..ce3a39d315 --- /dev/null +++ b/modules/admin/proc-enabling-telemetry-using-helm.adoc @@ -0,0 +1,58 @@ +[id="proc-enabling-telemetry-using-helm_{context}"] += Enabling telemetry data collection using the Helm Chart + +You can enable the telemetry data collection feature by using the Helm Chart. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Helm Chart. + +.Procedure + +. In the *Developer* perspective of the {ocp-short} web console, go to the *Helm* view to see the list of Helm releases. +. Click the *overflow* menu on the Helm release that you want to use and select *Upgrade*. ++ +[NOTE] +==== +You can also create a new Helm release by clicking the *Create* button and edit the configuration to enable telemetry. +==== + +. Use either the *Form* view or *YAML* view to edit the Helm configuration: +** Using *Form view* ++ +.. Expand *Root Schema → global → Dynamic plugins configuration. → List of dynamic plugins that should be installed in the backstage application*. +.. Click the *Add list of dynamic plugins that should be installed in the backstage application.* link. + +.. Perform one of the following steps: ++ +*** If you have not configured the plugin, add the following value in the *Package specification of the dynamic plugin to install. It should be usable by the npm pack command.* field: ++ +`./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment` ++ +*** If you have configured the plugin, find the *Package specification of the dynamic plugin to install. It should be usable by the npm pack command.* field with the `./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment` value. + +.. Clear the *Disable the plugin* checkbox. +.. Click *Upgrade*. + +** Using *YAML view* ++ +.. Perform one of the following steps: ++ +*** If you have not configured the plugin, add the following YAML code in your Helm configuration file: ++ +[source,yaml] +---- +# ... +global: + dynamic: + plugins: + - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' + disabled: false +# ... +---- ++ +*** If you have configured the plugin, search it in your Helm configuration and set the value of the `plugins.disabled` parameter to `false`. + +.. Click *Upgrade*. + diff --git a/modules/admin/proc-enabling-telemetry-using-operator.adoc b/modules/admin/proc-enabling-telemetry-using-operator.adoc new file mode 100644 index 0000000000..404ae4b2e6 --- /dev/null +++ b/modules/admin/proc-enabling-telemetry-using-operator.adoc @@ -0,0 +1,48 @@ +[id="proc-enabling-telemetry-using-operator_{context}"] += Enabling telemetry data collection using the Operator + +You can enable the telemetry data collection feature by using the Operator. + +.Prerequisites + +* You have logged in as an administrator in the {ocp-short} web console. +* You have installed {product} on {ocp-short} using the Operator. + +.Procedure + +. Perform one of the following steps: ++ +* If you have created the `dynamic-plugins-rhdh` ConfigMap file and not configured the `analytics-provider-segment` plugin, add the plugin to the list of plugins and set its `plugins.disabled` parameter to `false`. ++ +* If you have created the `dynamic-plugins-rhdh` ConfigMap file and configured the `analytics-provider-segment` plugin, search the plugin in the list of plugins and set its `plugins.disabled` parameter to `false`. ++ +* If you have not created the ConfigMap file, create it with the following YAML code: ++ +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dynamic-plugins-rhdh +data: + dynamic-plugins.yaml: | + includes: + - dynamic-plugins.default.yaml + plugins: + - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' + disabled: false +---- + +. Set the value of the `dynamicPluginsConfigMapName` parameter to the name of the ConfigMap file in your `Backstage` custom resource: ++ +[source,yaml] +---- +# ... +spec: + application: + dynamicPluginsConfigMapName: dynamic-plugins-rhdh +# ... +---- + +. Save the configuration changes. + diff --git a/modules/admin/proc-migrating-databases-to-an-external-server.adoc b/modules/admin/proc-migrating-databases-to-an-external-server.adoc new file mode 100644 index 0000000000..520ec78a07 --- /dev/null +++ b/modules/admin/proc-migrating-databases-to-an-external-server.adoc @@ -0,0 +1,129 @@ +[id="proc-migrating-databases-to-an-external-server_{context}"] += Migrating local databases to an external database server using the Operator + +By default, {product} hosts the data for each plugin in a PostgreSQL database. When you fetch the list of databases, you might see multiple databases based on the number of plugins configured in {product-short}. You can migrate the data from an {product-very-short} instance hosted on a local PostgreSQL server to an external PostgreSQL service, such as AWS RDS, Azure database, or Crunchy database. To migrate the data from each {product-very-short} instance, you can use PostgreSQL utilities, such as link:https://www.postgresql.org/docs/current/app-pgdump.html[`pg_dump`] with link:https://www.postgresql.org/docs/current/app-psql.html[`psql`] or link:https://www.pgadmin.org/[`pgAdmin`]. + +[NOTE] +==== +The following procedure uses a database copy script to do a quick migration. +==== + +.Prerequisites + +* You have installed the link:https://www.postgresql.org/docs/current/app-pgdump.html[`pg_dump`] and link:https://www.postgresql.org/docs/current/app-psql.html[`psql`] utilities on your local machine. +* For data export, you have the PGSQL user privileges to make a full dump of local databases. +* For data import, you have the PGSQL admin privileges to create an external database and populate it with database dumps. + +.Procedure + +. Configure port forwarding for the local PostgreSQL database pod by running the following command on a terminal: ++ +[source,terminal] +---- +oc port-forward -n : +---- +Where: +* The `` variable denotes the name of a PostgreSQL pod with the format `backstage-psql--<_index>`. +* The `` variable denotes the port of your choice to forward PostgreSQL data to. +* The `` variable denotes the local PostgreSQL instance port, such as `5432`. ++ +.Example: Configuring port forwarding +[source,terminal] +---- +oc port-forward -n developer-hub backstage-psql-developer-hub-0 15432:5432 +---- + +. Make a copy of the following `db_copy.sh` script and edit the details based on your configuration: ++ +[source,bash] +---- +#!/bin/bash + +to_host= <1> +to_port=5432 <2> +to_user=postgres <3> + +from_host=127.0.0.1 <4> +from_port=15432 <5> +from_user=postgres <6> + +allDB=("backstage_plugin_app" "backstage_plugin_auth" "backstage_plugin_catalog" "backstage_plugin_permission" "backstage_plugin_scaffolder" "backstage_plugin_search") <7> + +for db in ${!allDB[@]}; +do + db=${allDB[$db]} + echo Copying database: $db + PGPASSWORD=$TO_PSW psql -h $to_host -p $to_port -U $to_user -c "create database $db;" + pg_dump -h $from_host -p $from_port -U $from_user -d $db | PGPASSWORD=$TO_PSW psql -h $to_host -p $to_port -U $to_user -d $db +done +---- +<1> The destination host name, for example, `.rds.amazonaws.com`. +<2> The destination port, such as `5432`. +<3> The destination server username, for example, `postgres`. +<4> The source host name, such as `127.0.0.1`. +<5> The source port number, such as the `` variable. +<6> The source server username, for example, `postgres`. +<7> The name of databases to import in double quotes separated by spaces, for example, `("backstage_plugin_app" "backstage_plugin_auth" "backstage_plugin_catalog" "backstage_plugin_permission" "backstage_plugin_scaffolder" "backstage_plugin_search")`. + +. Create a destination database for copying the data: ++ +[source,terminal] +---- +/bin/bash TO_PSW= /path/to/db_copy.sh <1> +---- +<1> The `` variable denotes the password to connect to the destination database. ++ +[NOTE] +==== +You can stop port forwarding when the copying of the data is complete. For more information about handling large databases and using the compression tools, see the link:https://www.postgresql.org/docs/current/backup-dump.html#BACKUP-DUMP-LARGE[Handling Large Databases] section on the PostgreSQL website. +==== + +. Reconfigure your `Backstage` custom resource (CR). For more information, see link:{LinkAdminGuide}#proc-configuring-postgresql-instance-using-operator_admin-rhdh[Configuring an external PostgreSQL instance using the Operator]. +. Check that the following code is present at the end of your `Backstage` CR after reconfiguration: ++ +[source,yaml] +---- +# ... +spec: + database: + enableLocalDb: false + application: + # ... + extraFiles: + secrets: + - name: + key: postgres-crt.pem # key name as in Secret + extraEnvs: + secrets: + - name: +# ... +---- ++ +[NOTE] +==== +Reconfiguring the `Backstage` CR deletes the corresponding `StatefulSet` and `Pod` objects, but does not delete the `PersistenceVolumeClaim` object. Use the following command to delete the local `PersistenceVolumeClaim` object: + +[source,terminal] +---- +oc -n developer-hub delete pvc +---- +where, the `` variable is in the `data-` format. +==== + +. Apply the configuration changes. + + +.Verification + +. Verify that your {product-very-short} instance is running with the migrated data and does not contain the local PostgreSQL database by running the following command: ++ +[source,terminal] +---- +oc get pods -n +---- + +. Check the output for the following details: +* The `backstage-developer-hub-xxx` pod is in running state. +* The `backstage-psql-developer-hub-0` pod is not available. ++ +You can also verify these details using the *Topology* view in the {ocp-short} web console. diff --git a/modules/admin/proc-rhdh-deployment-config.adoc b/modules/admin/proc-rhdh-deployment-config.adoc new file mode 100644 index 0000000000..47b44e98b0 --- /dev/null +++ b/modules/admin/proc-rhdh-deployment-config.adoc @@ -0,0 +1,150 @@ +// Module included in: +// title-admin.adoc + +[id="proc-rhdh-deployment-config_{context}"] += Configuring {product} deployment + +The {product} operator exposes a `rhdh.redhat.com/v1alpha2` API Version of its Custom Resource Definition (CRD). This CRD exposes a generic `spec.deployment.patch` field, which gives you full control over the {product-short} Deployment resource. This field can be a fragment of the standard `apps.Deployment` Kubernetes object. + +.Procedure + +. Create a {product-short} Custom Resource Definition with the following fields: + +-- +.Example +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: +---- + +`labels`:: +Add labels to the {product-short} pod. ++ +.Example adding the label `my=true` +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + metadata: + labels: + my: true +---- + +`volumes`:: +Add an additional volume named `my-volume` and mount it under `/my/path` in the {product-short} application container. ++ +.Example additional volume +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + spec: + containers: + - name: backstage-backend + volumeMounts: + - mountPath: /my/path + name: my-volume + volumes: + - ephemeral: + volumeClaimTemplate: + spec: + storageClassName: "special" + name: my-volume +---- ++ +Replace the default `dynamic-plugins-root` volume with a persistent volume claim (PVC) named `dynamic-plugins-root`. Note the `$patch: replace` directive, otherwise a new volume will be added. ++ +.Example `dynamic-plugins-root` volume replacement +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + spec: + volumes: + - $patch: replace + name: dynamic-plugins-root + persistentVolumeClaim: + claimName: dynamic-plugins-root +---- + +`cpu` request:: + +Set the CPU request for the {product-short} application container to 250m. ++ +.Example CPU request +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + spec: + containers: + - name: backstage-backend + resources: + requests: + cpu: 250m +---- + +`my-sidecar` container:: + +Add a new `my-sidecar` sidecar container into the {product-short} Pod. ++ +.Example side car container +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + spec: + containers: + - name: my-sidecar + image: quay.io/my-org/my-sidecar:latest +---- + +-- + +[role="_additional-resources"] +.Additional resources + +* To learn more about merging, see link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#basic-patch-format[Strategic Merge Patch]. \ No newline at end of file diff --git a/modules/admin/proc-rhdh-monitoring-logging-aks.adoc b/modules/admin/proc-rhdh-monitoring-logging-aks.adoc new file mode 100644 index 0000000000..83d5bed36f --- /dev/null +++ b/modules/admin/proc-rhdh-monitoring-logging-aks.adoc @@ -0,0 +1,54 @@ +[id='proc-rhdh-monitoring-logging-aks_{context}'] += Monitoring and logging with Azure Kubernetes Services (AKS) in {product} + +Monitoring and logging are integral aspects of managing and maintaining Azure Kubernetes Services (AKS) in {product}. With features like Managed Prometheus Monitoring and Azure Monitor integration, administrators can efficiently monitor resource utilization, diagnose issues, and ensure the reliability of their containerized workloads. + +To enable Managed Prometheus Monitoring, use the `-enable-azure-monitor-metrics` option within either the `az aks create` or `az aks update` command, depending on whether you're creating a new cluster or updating an existing one, such as: + +[source,bash] +---- +az aks create/update --resource-group --name --enable-azure-monitor-metrics +---- + +The previous command installs the metrics add-on, which gathers https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/prometheus-metrics-overview[Prometheus metrics]. Using the previous command, you can enable monitoring of Azure resources through both native Azure Monitor metrics and Prometheus metrics. You can also view the results in the portal under *Monitoring -> Insights*. For more information, see https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/monitor-azure-resource[Monitor Azure resources with Azure Monitor]. + +Furthermore, metrics from both the Managed Prometheus service and Azure Monitor can be accessed through Azure Managed Grafana service. For more information, see https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/azure-monitor-workspace-manage?tabs=azure-portal#link-a-grafana-workspace[Link a Grafana workspace] section. + +By default, Prometheus uses the minimum ingesting profile, which optimizes ingestion volume and sets default configurations for scrape frequency, targets, and metrics collected. The default settings can be customized through custom configuration. Azure offers various methods, including using different ConfigMaps, to provide scrape configuration and other metric add-on settings. For more information about default configuration, see https://learn.microsoft.com/en-us/azure/azure-monitor/containers/prometheus-metrics-scrape-default[Default Prometheus metrics configuration in Azure Monitor] and https://learn.microsoft.com/en-us/azure/azure-monitor/containers/prometheus-metrics-scrape-configuration?tabs=CRDConfig%2CCRDScrapeConfig[Customize scraping of Prometheus metrics in Azure Monitor managed service for Prometheus] documentation. + +== Viewing logs with Azure Kubernetes Services (AKS) + +You can access live data logs generated by Kubernetes objects and collect log data in Container Insights within AKS. + +.Prerequisites + +* You have deployed {product-short} on {aks-short}. + +For more information, see xref:{installing-on-aks-book-url}#assembly-install-rhdh-aks[{installing-on-aks-book-title}]. + +.Procedure + +View live logs from your {product-short} instance:: ++ +-- +. Navigate to the Azure Portal. +. Search for the resource group `` and locate your AKS cluster ``. +. Select *Kubernetes resources -> Workloads* from the menu. +. Select the `-developer-hub` (in case of Helm Chart installation) or `-backstage` (in case of Operator-backed installation) deployment. +. Click *Live Logs* in the left menu. +. Select the pod. ++ +NOTE: There must be only single pod. + +Live log data is collected and displayed. +-- + +View real-time log data from the Container Engine:: ++ +-- +. Navigate to the Azure Portal. +. Search for the resource group `` and locate your AKS cluster ``. +. Select *Monitoring* -> *Insights* from the menu. +. Go to the *Containers* tab. +. Find the backend-backstage container and click it to view real-time log data as it's generated by the Container Engine. +-- diff --git a/modules/admin/proc-rhdh-monitoring-logging-aws.adoc b/modules/admin/proc-rhdh-monitoring-logging-aws.adoc new file mode 100644 index 0000000000..2dee4d2303 --- /dev/null +++ b/modules/admin/proc-rhdh-monitoring-logging-aws.adoc @@ -0,0 +1,149 @@ +[id='proc-rhdh-monitoring-logging-aws_{context}'] += Monitoring and logging with Amazon Web Services (AWS) in {product} + +In the {product}, monitoring and logging are facilitated through Amazon Web Services (AWS) integration. With features like Amazon CloudWatch for real-time monitoring and Amazon Prometheus for comprehensive logging, you can ensure the reliability, scalability, and compliance of your {product-short} application hosted on AWS infrastructure. + +This integration enables you to oversee, diagnose, and refine your applications in the Red Hat ecosystem, leading to an improved development and operational journey. + +== Monitoring with Amazon Prometheus + +{product} provides Prometheus metrics related to the running application. For more information about enabling or deploying Prometheus for EKS clusters, see https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html[Prometheus metrics] in the Amazon documentation. + +To monitor {product-short} using https://aws.amazon.com/prometheus/[Amazon Prometheus], you need to create an Amazon managed service for the Prometheus workspace and configure the ingestion of the Developer Hub Prometheus metrics. For more information, see https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html[Create a workspace] and https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-ingest-metrics.html[Ingest Prometheus metrics to the workspace] sections in the Amazon documentation. + +After ingesting Prometheus metrics into the created workspace, you can configure the metrics scraping to extract data from pods based on specific pod annotations. + +=== Configuring annotations for monitoring + +You can configure the annotations for monitoring in both Helm deployment and Operator-backed deployment. + +Helm deployment:: ++ +-- +To annotate the backstage pod for monitoring, update your `values.yaml` file as follows: + +[source,yaml] +---- +upstream: + backstage: + # --- TRUNCATED --- + podAnnotations: + prometheus.io/scrape: 'true' + prometheus.io/path: '/metrics' + prometheus.io/port: '7007' + prometheus.io/scheme: 'http' +---- +-- + +Operator-backed deployment:: ++ +-- +.Procedure + +. As an administrator of the operator, edit the default configuration to add Prometheus annotations as follows: ++ +[source,bash] +---- +# Update OPERATOR_NS accordingly +OPERATOR_NS=rhdh-operator +kubectl edit configmap backstage-default-config -n "${OPERATOR_NS}" +---- + +. Find the `deployment.yaml` key in the ConfigMap and add the annotations to the `spec.template.metadata.annotations` field as follows: ++ +[source,yaml] +---- +deployment.yaml: |- + apiVersion: apps/v1 + kind: Deployment + # --- truncated --- + spec: + template: + # --- truncated --- + metadata: + labels: + rhdh.redhat.com/app: # placeholder for 'backstage-' + # --- truncated --- + annotations: + prometheus.io/scrape: 'true' + prometheus.io/path: '/metrics' + prometheus.io/port: '7007' + prometheus.io/scheme: 'http' + # --- truncated --- +---- + +. Save your changes. +-- + +.Verification + +To verify if the scraping works: + +. Use `kubectl` to port-forward the Prometheus console to your local machine as follows: ++ +[source,bash] +---- +kubectl --namespace=prometheus port-forward deploy/prometheus-server 9090 +---- + +. Open your web browser and navigate to `pass:c[http://localhost:9090]` to access the Prometheus console. +. Monitor relevant metrics, such as `process_cpu_user_seconds_total`. + +== Logging with Amazon CloudWatch logs + +Logging within the {product} relies on the https://github.com/winstonjs/winston[winston library]. By default, logs at the debug level are not recorded. To activate debug logs, you must set the environment variable `LOG_LEVEL` to debug in your {product} instance. + +=== Configuring the application log level + +You can configure the application log level in both Helm deployment and Operator-backed deployment. + +Helm deployment:: ++ +-- +To update the logging level, add the environment variable `LOG_LEVEL` to your Helm chart's `values.yaml` file: + +[source,yaml] +---- +upstream: + backstage: + # --- Truncated --- + extraEnvVars: + - name: LOG_LEVEL + value: debug +---- +-- + +Operator-backed deployment:: ++ +-- +You can modify the logging level by including the environment variable `LOG_LEVEL` in your custom resource as follows: + +[source,yaml] +---- +spec: + # Other fields omitted + application: + extraEnvs: + envs: + - name: LOG_LEVEL + value: debug +---- +-- + +=== Retrieving logs from Amazon CloudWatch + +The CloudWatch Container Insights are used to capture logs and metrics for Amazon EKS. For more information, see https://docs.aws.amazon.com/prescriptive-guidance/latest/implementing-logging-monitoring-cloudwatch/kubernetes-eks-logging.html[Logging for Amazon EKS] documentation. + +To capture the logs and metrics, https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-addon.html[install the Amazon CloudWatch Observability EKS add-on] in your cluster. Following the setup of Container Insights, you can access container logs using Logs Insights or Live Tail views. + +CloudWatch names the log group where all container logs are consolidated in the following manner: + +`/aws/containerinsights//application` + +Following is an example query to retrieve logs from the Developer Hub instance: + +[source,sql] +---- +fields @timestamp, @message, kubernetes.container_name +| filter kubernetes.container_name in ["install-dynamic-plugins", "backstage-backend"] +---- diff --git a/modules/admin/proc-using-aws-cognito-auth-provider.adoc b/modules/admin/proc-using-aws-cognito-auth-provider.adoc new file mode 100644 index 0000000000..a35ff5404e --- /dev/null +++ b/modules/admin/proc-using-aws-cognito-auth-provider.adoc @@ -0,0 +1,219 @@ +[id='proc-using-aws-cognito-auth-provider_{context}'] += Using Amazon Cognito as an authentication provider in {product} + +In this section, Amazon Cognito is an AWS service for adding an authentication layer to {product-short}. You can sign in directly to the {product-short} using a user pool or fedarate through a third-party identity provider. + +Although Amazon Cognito is not part of the core authentication providers for the Developer Hub, it can be integrated using the generic OpenID Connect (OIDC) provider. + +You can configure your {product-short} in both Helm Chart and Operator-backed deployments. + +.Prerequisites + +* You have a User Pool or you have created a new one. For more information about user pools, see https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html?icmpid=docs_cognito_console_help_panel[Amazon Cognito user pools] documentation. ++ +[NOTE] +==== +Ensure that you have noted the AWS region where the user pool is located and the user pool ID. +==== + +* You have created an App Client within your user pool for integrating the hosted UI. For more information, see https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html#cognito-user-pools-create-an-app-integration[Setting up the hosted UI with the Amazon Cognito console]. ++ +-- +When setting up the hosted UI using the Amazon Cognito console, ensure to make the following adjustments: + +. In the *Allowed callback URL(s)* section, include the URL `pass:c[https:///api/auth/oidc/handler/frame]`. Ensure to replace `` with your {product-short} application's URL, such as, `my.rhdh.example.com`. + +. Similarly, in the *Allowed sign-out URL(s)* section, add `pass:c[https://]`. Replace `` with your {product-short} application's URL, such as `my.rhdh.example.com`. + +. Under *OAuth 2.0 grant types*, select *Authorization code grant* to return an authorization code. + +. Under *OpenID Connect scopes*, ensure to select at least the following scopes: + +** OpenID +** Profile +** Email +-- + +Helm deployment:: ++ +-- +.Procedure + +. Edit or create your custom `app-config-rhdh` ConfigMap as follows: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + # --- Truncated --- + app: + title: Red Hat Developer Hub + + signInPage: oidc + auth: + environment: production + session: + secret: ${AUTH_SESSION_SECRET} + providers: + oidc: + production: + clientId: ${AWS_COGNITO_APP_CLIENT_ID} + clientSecret: ${AWS_COGNITO_APP_CLIENT_SECRET} + metadataUrl: ${AWS_COGNITO_APP_METADATA_URL} + callbackUrl: ${AWS_COGNITO_APP_CALLBACK_URL} + scope: 'openid profile email' + prompt: auto +---- + +. Edit or create your custom `secrets-rhdh` Secret using the following template: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: secrets-rhdh +stringData: + AUTH_SESSION_SECRET: "my super auth session secret - change me!!!" + AWS_COGNITO_APP_CLIENT_ID: "my-aws-cognito-app-client-id" + AWS_COGNITO_APP_CLIENT_SECRET: "my-aws-cognito-app-client-secret" + AWS_COGNITO_APP_METADATA_URL: "https://cognito-idp.[region].amazonaws.com/[userPoolId]/.well-known/openid-configuration" + AWS_COGNITO_APP_CALLBACK_URL: "https://[rhdh_dns]/api/auth/oidc/handler/frame" +---- + +. Add references of both the ConfigMap and Secret resources in your `values.yaml` file: ++ +[source,yaml] +---- +upstream: + backstage: + image: + pullSecrets: + - rhdh-pull-secret + podSecurityContext: + fsGroup: 2000 + extraAppConfig: + - filename: app-config-rhdh.yaml + configMapRef: app-config-rhdh + extraEnvVarsSecrets: + - secrets-rhdh +---- + +. Upgrade the Helm deployment: ++ +[source,terminal,subs="attributes+"] +---- +helm upgrade rhdh \ + openshift-helm-charts/redhat-developer-hub \ + [--version {product-chart-version}] \ + --values /path/to/values.yaml +---- +-- + +Operator-backed deployment:: ++ +-- +. Add the following code to your `app-config-rhdh` ConfigMap: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + # --- Truncated --- + + signInPage: oidc + auth: + # Production to disable guest user login + environment: production + # Providing an auth.session.secret is needed because the oidc provider requires session support. + session: + secret: ${AUTH_SESSION_SECRET} + providers: + oidc: + production: + # See https://github.com/backstage/backstage/blob/master/plugins/auth-backend-module-oidc-provider/config.d.ts + clientId: ${AWS_COGNITO_APP_CLIENT_ID} + clientSecret: ${AWS_COGNITO_APP_CLIENT_SECRET} + metadataUrl: ${AWS_COGNITO_APP_METADATA_URL} + callbackUrl: ${AWS_COGNITO_APP_CALLBACK_URL} + # Minimal set of scopes needed. Feel free to add more if needed. + scope: 'openid profile email' + + # Note that by default, this provider will use the 'none' prompt which assumes that your are already logged on in the IDP. + # You should set prompt to: + # - auto: will let the IDP decide if you need to log on or if you can skip login when you have an active SSO session + # - login: will force the IDP to always present a login form to the user + prompt: auto +---- + +. Add the following code to your `secrets-rhdh` Secret: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: secrets-rhdh +stringData: + # --- Truncated --- + + # TODO: Change auth session secret. + AUTH_SESSION_SECRET: "my super auth session secret - change me!!!" + + # TODO: user pool app client ID + AWS_COGNITO_APP_CLIENT_ID: "my-aws-cognito-app-client-id" + + # TODO: user pool app client Secret + AWS_COGNITO_APP_CLIENT_SECRET: "my-aws-cognito-app-client-secret" + + # TODO: Replace region and user pool ID + AWS_COGNITO_APP_METADATA_URL: "https://cognito-idp.[region].amazonaws.com/[userPoolId]/.well-known/openid-configuration" + + # TODO: Replace + AWS_COGNITO_APP_CALLBACK_URL: "https://[rhdh_dns]/api/auth/oidc/handler/frame" +---- + +. Ensure your Custom Resource contains references to both the `app-config-rhdh` ConfigMap and `secrets-rhdh` Secret: ++ +[source,yaml,subs="attributes+"] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + # TODO: this the name of your {product-short} instance + name: my-rhdh +spec: + application: + imagePullSecrets: + - "rhdh-pull-secret" + route: + enabled: false + appConfig: + configMaps: + - name: "app-config-rhdh" + extraEnvs: + secrets: + - name: "secrets-rhdh" +---- + +. Optional: If you have an existing {product-short} instance backed by the Custom Resource and you have not edited it, you can manually delete the {product-short} deployment to recreate it using the operator. Run the following command to delete the {product-short} deployment: ++ +[source,bash] +---- +kubectl delete deployment -l app.kubernetes.io/instance= +---- +-- + +.Verification + +. Navigate to your {product-short} web URL and sign in using OIDC authentication, which prompts you to authenticate through the configured AWS Cognito user pool. + +. Once logged in, access *Settings* and verify user details. diff --git a/modules/admin/proc-using-azure-auth-provider.adoc b/modules/admin/proc-using-azure-auth-provider.adoc new file mode 100644 index 0000000000..285c02060f --- /dev/null +++ b/modules/admin/proc-using-azure-auth-provider.adoc @@ -0,0 +1,220 @@ +[id='proc-using-azure-auth-provider_{context}'] + += Using Microsoft Azure as an authentication provider in {product} + +The `core-plugin-api` package in {product-short} comes integrated with Microsoft Azure authentication provider, authenticating signing in using Azure OAuth. + +.Prerequisites +* You have deployed {product-short} on AKS. + +For more information, see xref:{installing-on-aks-book-url}#assembly-install-rhdh-aks[Installing {product} on {aks-name} ({aks-short})]. + +* You have created registered your application in Azure portal. For more information, see https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app[Register an application with the Microsoft identity platform]. + +== Using Microsoft Azure as an authentication provider in Helm deployment + +You can use Microsoft Azure as an authentication provider in {product}, when installed using the Helm Chart. + +For more information, see xref:{installing-on-aks-book-url}#proc-rhdh-deploy-aks-helm_assembly-install-rhdh-aks[Deploying {product-short} on {aks-short} with the Helm chart]. + +.Procedure + +. After the application is registered, note down the following: ++ +-- +** `clientId`: Application (client) ID, found under App *Registration -> Overview*. +** `clientSecret`: Secret, found under *App Registration -> Certificates & secrets (create new if needed). +** `tenantId`: Directory (tenant) ID, found under *App Registration -> Overview*. +-- + +. Ensure the following fragment is included in your {product-short} ConfigMap: ++ +-- +[source,yaml] +---- +auth: + environment: production + providers: + microsoft: + production: + clientId: ${AZURE_CLIENT_ID} + clientSecret: ${AZURE_CLIENT_SECRET} + tenantId: ${AZURE_TENANT_ID} + domainHint: ${AZURE_TENANT_ID} + additionalScopes: + - Mail.Send +---- +You can either create a new file or add it to an existing one. +-- + +. Apply the ConfigMap to your Kubernetes cluster: ++ +-- +[source,bash] +---- +kubectl -n apply -f .yaml +---- +-- + +. Create or reuse an existing Secret containing Azure credentials and add the following fragment: ++ +-- +[source,yaml] +---- +stringData: + AZURE_CLIENT_ID: + AZURE_CLIENT_SECRET: + AZURE_TENANT_ID: +---- +-- + +. Apply the secret to your Kubernetes cluster: ++ +-- +[source,bash] +---- +kubectl -n apply -f .yaml +---- +-- + +. Ensure your `values.yaml` file references the previously created ConfigMap and Secret: ++ +-- +[source,yaml] +---- +upstream: + backstage: + ... + extraAppConfig: + - filename: ... + configMapRef: + extraEnvVarsSecrets: + - +---- +-- + +. Optional: If the Helm Chart is already installed, upgrade it: ++ +-- +[source,terminal,subs="attributes+"] +---- +helm -n upgrade -f redhat-developer/backstage --version {product-chart-version} +---- +-- + +. Optional: If your `rhdh.yaml` file is not changed, for example, you only updated the ConfigMap and Secret referenced from it, refresh your {product-short} deployment by removing the corresponding pods: ++ +-- +[source,bash] +---- +kubectl -n delete pods -l backstage.io/app=backstage- +---- +-- + +== Using Microsoft Azure as an authentication provider in Operator-backed deployment + +You can use Microsoft Azure as an authentication provider in {product}, when installed using the Operator. + +For more information, see xref:{installing-on-ocp-book-url}#proc-install-operator_assembly-install-rhdh-ocp-operator[Installing {product} on {ocp-short} with the Operator]. + +.Procedure + +. After the application is registered, note down the following: ++ +-- +** `clientId`: Application (client) ID, found under App *Registration -> Overview*. +** `clientSecret`: Secret, found under *App Registration -> Certificates & secrets (create new if needed). +** `tenantId`: Directory (tenant) ID, found under *App Registration -> Overview*. +-- + +. Ensure the following fragment is included in your {product-short} ConfigMap: ++ +-- +[source,yaml] +---- +auth: + environment: production + providers: + microsoft: + production: + clientId: ${AZURE_CLIENT_ID} + clientSecret: ${AZURE_CLIENT_SECRET} + tenantId: ${AZURE_TENANT_ID} + domainHint: ${AZURE_TENANT_ID} + additionalScopes: + - Mail.Send +---- +You can either create a new file or add it to an existing one. +-- + +. Apply the ConfigMap to your Kubernetes cluster: ++ +-- +[source,bash] +---- +kubectl -n apply -f .yaml +---- +-- + +. Create or reuse an existing Secret containing Azure credentials and add the following fragment: ++ +-- +[source,yaml] +---- +stringData: + AZURE_CLIENT_ID: + AZURE_CLIENT_SECRET: + AZURE_TENANT_ID: +---- +-- + +. Apply the secret to your Kubernetes cluster: ++ +-- +[source,bash] +---- +kubectl -n apply -f .yaml +---- +-- + +. Ensure your Custom Resource manifest contains references to the previously created ConfigMap and Secret: ++ +-- +[source,yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: +spec: + application: + imagePullSecrets: + - rhdh-pull-secret + route: + enabled: false + appConfig: + configMaps: + - name: + extraEnvs: + secrets: + - name: +---- +-- + +. Apply your Custom Resource manifest: ++ +-- +[source,bash] +---- +kubectl -n apply -f rhdh.yaml +---- +-- + +. Optional: If your `rhdh.yaml` file is not changed, for example, you only updated the ConfigMap and Secret referenced from it, refresh your {product-short} deployment by removing the corresponding pods: ++ +-- +[source,bash] +---- +kubectl -n delete pods -l backstage.io/app=backstage- +---- +-- diff --git a/modules/admin/ref-customizing-telemetry-segment.adoc b/modules/admin/ref-customizing-telemetry-segment.adoc new file mode 100644 index 0000000000..d5c386f31a --- /dev/null +++ b/modules/admin/ref-customizing-telemetry-segment.adoc @@ -0,0 +1,12 @@ +[id="customizing-telemetry-segment_{context}"] += Customizing telemetry Segment source + + +The `analytics-provider-segment` plugin sends the collected telemetry data to {company-name} by default. However, you can configure a new Segment source that receives telemetry data based on your needs. For configuration, you need a unique Segment write key that points to the Segment source. + +[NOTE] +==== +By configuring a new Segment source, you can collect and analyze the same set of data that is mentioned in the link:{LinkAdminGuide}#assembly-rhdh-telemetry[Telemetry data collection] section. You might also require to create your own telemetry data collection notice for your application users. +==== + + diff --git a/modules/admin/ref-disabling-telemetry.adoc b/modules/admin/ref-disabling-telemetry.adoc new file mode 100644 index 0000000000..66bf4470e1 --- /dev/null +++ b/modules/admin/ref-disabling-telemetry.adoc @@ -0,0 +1,6 @@ +[id="disabling-telemetry-data-collection_{context}"] += Disabling telemetry data collection in {product-very-short} + +To disable telemetry data collection, you must disable the `analytics-provider-segment` plugin either using the Helm Chart or the {product} Operator configuration. + + diff --git a/modules/admin/ref-enabling-telemetry.adoc b/modules/admin/ref-enabling-telemetry.adoc new file mode 100644 index 0000000000..9df4622324 --- /dev/null +++ b/modules/admin/ref-enabling-telemetry.adoc @@ -0,0 +1,5 @@ +[id="enabling-telemetry-data-collection_{context}"] += Enabling telemetry data collection in {product-very-short} + +The telemetry data collection feature is enabled by default. However, if you have disabled the feature and want to re-enable it, you must enable the `analytics-provider-segment` plugin either by using the Helm Chart or the {product} Operator configuration. + diff --git a/modules/authentication/proc-authenticationg-with-the-guest-user-on-a-helm-based-installation.adoc b/modules/authentication/proc-authenticationg-with-the-guest-user-on-a-helm-based-installation.adoc new file mode 100644 index 0000000000..aa116e6546 --- /dev/null +++ b/modules/authentication/proc-authenticationg-with-the-guest-user-on-a-helm-based-installation.adoc @@ -0,0 +1,32 @@ +:_mod-docs-content-type: PROCEDURE +[id="authenticating-with-the-guest-user-on-a-helm-based-installation_{context}"] += Authenticating with the Guest user on a Helm-based installation + +On a Helm-based installation, you can configure {product-short} to log in as a Guest user and access {product-short} features. + +.Prerequisites +* You link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}#Install%20and%20Upgrade[Installed {product-short} by using the Helm Chart]. + +.Procedure +* To enable the guest user in your {product-short} custom configuration, link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/administration_guide_for_red_hat_developer_hub/index#proc-add-custom-app-file-openshift-helm_admin-rhdh[configure your {product} Helm Chart] with following content: ++ +.{product} Helm Chart configuration fragment +[source,yaml] +---- +upstream: + backstage: + appConfig: + app: + baseUrl: 'https://{{- include "janus-idp.hostname" . }}' + auth: + environment: development + providers: + guest: + dangerouslyAllowOutsideDevelopment: true +---- + +.Verification +. Go to the {product-short} login page. +. To log in with the Guest user account, click **Enter** in the **Guest** tile. +. In the {product-short} **Settings** page, your profile name is **Guest**. +. You can use {product-short} features. diff --git a/modules/authentication/proc-authenticationg-with-the-guest-user-on-an-operator-based-installation.adoc b/modules/authentication/proc-authenticationg-with-the-guest-user-on-an-operator-based-installation.adoc new file mode 100644 index 0000000000..958e73542c --- /dev/null +++ b/modules/authentication/proc-authenticationg-with-the-guest-user-on-an-operator-based-installation.adoc @@ -0,0 +1,28 @@ +:_mod-docs-content-type: PROCEDURE +[id="authenticating-with-the-guest-user-on-an-operator-based-installation_{context}"] += Authenticating with the Guest user on an Operator-based installation + +After an Operator-based installation, you can configure {product-short} to log in as a Guest user and access {product-short} features. + +.Prerequisites +* You link:[installed {product-short} by using the Operator]. +* You link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/administration_guide_for_red_hat_developer_hub/index#proc-add-custom-app-config-file-ocp-operator_admin-rhdh[added a custom {product-short} application configuration], and have sufficient permissions to modify it. + +.Procedure +* To enable the guest user in your {product-short} custom configuration, link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/administration_guide_for_red_hat_developer_hub/index#proc-add-custom-app-config-file-ocp-operator_admin-rhdh[edit your {product-short} application configuration] with following content: ++ +.`app-config-rhdh.yaml` fragment +[source,yaml] +---- +auth: + environment: development + providers: + guest: + dangerouslyAllowOutsideDevelopment: true +---- + +.Verification +. Go to the {product-short} login page. +. To log in with the Guest user account, click **Enter** in the **Guest** tile. +. In the {product-short} **Settings** page, your profile name is **Guest**. +. You can use {product-short} features. diff --git a/modules/authentication/proc-creating-a-custom-transformer-to-provision-users-from-rhsso-to-the-software-catalog.adoc b/modules/authentication/proc-creating-a-custom-transformer-to-provision-users-from-rhsso-to-the-software-catalog.adoc new file mode 100644 index 0000000000..802be18d07 --- /dev/null +++ b/modules/authentication/proc-creating-a-custom-transformer-to-provision-users-from-rhsso-to-the-software-catalog.adoc @@ -0,0 +1,92 @@ +[id="creating-a-custom-transformer-to-provision-users-from-rhsso-to-the-software-catalog"] += Creating a custom transformer to provision users from {rhsso-brand-name} ({rhsso}) to the software catalog + +To customize how {rhsso} users and groups are mapped to {product} entities, you can create a backend module that uses the `keycloakTransformerExtensionPoint` to provide custom user and group transformers for the Keycloak backend. + +.Prerequisites +* You have xref:provisioning-users-from-rhsso-to-the-software-catalog[enabled provisioning users from {rhsso-brand-name} ({rhsso}) to the software catalog]. + +.Procedure +. Create a new backend module with the `yarn new` command. + +. Add your custom user and group transformers to the `keycloakTransformerExtensionPoint`. + ++ +The following is an example of how the backend module can be defined: ++ +.`plugins/____/src/module.ts` +[source,javascript] +---- +import { + GroupTransformer, + keycloakTransformerExtensionPoint, + UserTransformer, +} from '@janus-idp/backstage-plugin-keycloak-backend'; + +const customGroupTransformer: GroupTransformer = async ( + entity, // entity output from default parser + realm, // Keycloak realm name + groups, // Keycloak group representation +) => { + /* apply transformations */ + return entity; +}; +const customUserTransformer: UserTransformer = async ( + entity, // entity output from default parser + user, // Keycloak user representation + realm, // Keycloak realm name + groups, // Keycloak group representation +) => { + /* apply transformations */ + return entity; +}; + +export const keycloakBackendModuleTransformer = createBackendModule({ + pluginId: 'catalog', + moduleId: 'keycloak-transformer', + register(reg) { + reg.registerInit({ + deps: { + keycloak: keycloakTransformerExtensionPoint, + }, + async init({ keycloak }) { + keycloak.setUserTransformer(customUserTransformer); + keycloak.setGroupTransformer(customGroupTransformer); + /* highlight-add-end */ + }, + }); + }, +}); +---- ++ +[IMPORTANT] +==== +The module's `pluginId` must be set to `catalog` to match the `pluginId` of the `keycloak-backend`; otherwise, the module fails to initialize. +==== + +. Install this new backend module into your {product-short} backend. ++ +[source,javascript] +---- +backend.add(import(backstage-plugin-catalog-backend-module-keycloak-transformer)) +---- + +.Verification + +* {product-short} imports the users and groups each time when started. +Check the console logs to verify that the synchronization is completed. ++ +.Successful synchronization example: +[source,json] +---- +{"class":"KeycloakOrgEntityProvider","level":"info","message":"Read 3 Keycloak users and 2 Keycloak groups in 1.5 seconds. Committing...","plugin":"catalog","service":"backstage","taskId":"KeycloakOrgEntityProvider:default:refresh","taskInstanceId":"bf0467ff-8ac4-4702-911c-380270e44dea","timestamp":"2024-09-25 13:58:04"} +{"class":"KeycloakOrgEntityProvider","level":"info","message":"Committed 3 Keycloak users and 2 Keycloak groups in 0.0 seconds.","plugin":"catalog","service":"backstage","taskId":"KeycloakOrgEntityProvider:default:refresh","taskInstanceId":"bf0467ff-8ac4-4702-911c-380270e44dea","timestamp":"2024-09-25 13:58:04"} +---- + +* After the first import is complete, navigate to the *Catalog* page and select **User** to view the list of users. + +* When you select a user, you see the information imported from {rhsso}. + +* You can select a group, view the list, and access or review the information imported from {rhsso}. + +* You can log in with an {rhsso} account. diff --git a/modules/authentication/proc-enabling-authentication-with-github.adoc b/modules/authentication/proc-enabling-authentication-with-github.adoc new file mode 100644 index 0000000000..b4157bdec0 --- /dev/null +++ b/modules/authentication/proc-enabling-authentication-with-github.adoc @@ -0,0 +1,168 @@ +[id="enabling-authentication-with-github"] += Enabling authentication with GitHub + +To authenticate users with GitHub, enable the GitHub authentication provider in {product}. + +.Prerequisites +* You have link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html/administration_guide_for_red_hat_developer_hub/assembly-add-custom-app-file-openshift_admin-rhdh[added a custom {product-short} application configuration], and have sufficient permissions to modify it. +* You have sufficient permissions in GitHub to create and manage a link:https://docs.github.com/en/apps/overview[GitHub App]. + +.Procedure +. To allow {product-short} to authenticate with GitHub, create a GitHub App. +Opt for a GitHub App instead of an OAuth app to use fine-grained permissions, gain more control over which repositories the application can access, and use short-lived tokens. + +.. link:https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app[Register a GitHub App] with the following configuration: ++ +* *GitHub App name*: Enter a unique name identifying your GitHub App, such as __<{product}>__-____. +* *Homepage URL*: Your {product-short} URL: `pass:c,a,q[{my-product-url}]`. +* *Authorization callback URL*: Your {product-short} authentication backend URL: `pass:c,a,q[{my-product-url}/api/auth/github/handler/frame]`. +* *Webhook URL*: Your {product-short} URL: `pass:c,a,q[{my-product-url}]`. +* *Webhook secret*: Provide a strong secret. +* *Repository permissions*: +** Enable `Read-only` access to: +*** *Administration* +*** *Commit statuses* +*** *Contents* +*** *Dependabot alerts* +*** *Deployments* +*** *Pull Requests* +*** *Webhooks* ++ +TIP: If you plan to make changes using the GitHub API, ensure that `Read and write` permissions are enabled instead of `Read-only`. + +** Toggle other permissions as per your needs. + +* *Organization permissions*: +** Enable `Read-only` access to *Members*. + +* For *Where can this GitHub App be installed?*, select `Only on this account`. + +.. In the *General* -> *Clients secrets* section, click *Generate a new client secret*. + +.. In the *General* -> *Private keys* section, click *Generate a private key*. + +.. In the *Install App* tab, choose an account to install your GitHub App on. + +.. Save the following values for the next step: + +* **App ID** +* **Client ID** +* **Client secret** +* **Private key** +* **Webhook secret** + +. To add your GitHub credentials to your {product-short} secrets, edit your {product-short} secrets, such as `secrets-rhdh`, and add the following key/value pairs: ++ +`AUTH_GITHUB_APP_ID`:: Enter the saved **App ID**. +`AUTH_GITHUB_CLIENT_ID`:: Enter the saved **Client ID**. +`GITHUB_HOST_DOMAIN`:: Enter your GitHub host domain: `pass:c[https://github.com]` unless you are using GitHub Enterprise. +`GITHUB_ORGANIZATION`:: Enter your GitHub organization name, such as `____'. +`GITHUB_ORG_URL`:: Enter `$GITHUB_HOST_DOMAIN/$GITHUB_ORGANIZATION`. +`GITHUB_CLIENT_SECRET`:: Enter the saved **Client Secret**. +`GITHUB_PRIVATE_KEY_FILE`:: Enter the saved **Private key**. +`GITHUB_WEBHOOK_URL`:: Enter your {product-short} URL: `pass:c,a,q[{my-product-url}]`. +`GITHUB_WEBHOOK_SECRET`:: Enter the saved *Webhook secret*. + +. To set up the GitHub authentication provider and enable integration with the GitHub API in your {product-short} custom configuration, edit your custom {product-short} ConfigMap such as `app-config-rhdh`, and add the following lines to the `app-config-rhdh.yaml` content: ++ +-- +.`app-config-rhdh.yaml` fragment with mandatory fields to enable authentication with GitHub +[source,yaml] +---- +auth: + environment: production + providers: + github: + production: + clientId: ${AUTH_GITHUB_CLIENT_ID} + clientSecret: ${AUTH_GITHUB_CLIENT_SECRET} +integrations: + github: + - host: ${GITHUB_HOST_DOMAIN} + apps: + - appId: ${AUTH_GITHUB_APP_ID} + clientId: ${AUTH_GITHUB_CLIENT_ID} + clientSecret: ${GITHUB_CLIENT_SECRET} + webhookUrl: ${GITHUB_WEBHOOK_URL} + webhookSecret: ${GITHUB_WEBHOOK_SECRET} + privateKey: | + ${GITHUB_PRIVATE_KEY_FILE} +signInPage: github +---- + +`environment: production`:: +Mark the environment as `production` to hide the Guest login in the {product-short} home page. + +`clientId`, `clientSecret`, `host`, `appId`, `webhookUrl`, `webhookSecret`, `privateKey`:: +Use the {product-short} application information that you have created in GitHub and configured in OpenShift as secrets. + +`sigInPage: github`:: +To enable the GitHub provider as default sign-in provider. + +Optional: Consider adding the following optional fields: + +`dangerouslyAllowSignInWithoutUserInCatalog: true`:: +To enable authentication without requiring to provision users in the {product-short} software catalog. ++ +WARNING: Use `dangerouslyAllowSignInWithoutUserInCatalog` to explore {product-short} features, but do not use it in production. ++ +.`app-config-rhdh.yaml` fragment with optional field to allow authenticating users absent from the software catalog +[source,yaml] +---- +auth: + environment: production + providers: + github: + production: + clientId: ${AUTH_GITHUB_CLIENT_ID} + clientSecret: ${AUTH_GITHUB_CLIENT_SECRET} +integrations: + github: + - host: ${GITHUB_HOST_DOMAIN} + apps: + - appId: ${AUTH_GITHUB_APP_ID} + clientId: ${AUTH_GITHUB_CLIENT_ID} + clientSecret: ${GITHUB_CLIENT_SECRET} + webhookUrl: ${GITHUB_WEBHOOK_URL} + webhookSecret: ${GITHUB_WEBHOOK_SECRET} + privateKey: | + ${GITHUB_PRIVATE_KEY_FILE} +signInPage: github +dangerouslyAllowSignInWithoutUserInCatalog: true +---- + +`callbackUrl`:: +The callback URL that GitHub uses when initiating an OAuth flow, such as: ____. +Define it when {product-short} is not the immediate receiver, such as in cases when you use one OAuth app for many {product-short} instances. ++ +.`app-config-rhdh.yaml` fragment with optional `enterpriseInstanceUrl` field +[source,yaml,subs="+quotes"] +---- +auth: + providers: + github: + production: + callbackUrl: ____ +---- + +`enterpriseInstanceUrl`:: +Your GitHub Enterprise URL. +Requires you defined the `GITHUB_HOST_DOMAIN` secret in the previous step. ++ +.`app-config-rhdh.yaml` fragment with optional `enterpriseInstanceUrl` field +[source,yaml,subs="+quotes"] +---- +auth: + providers: + github: + production: + enterpriseInstanceUrl: ${GITHUB_HOST_DOMAIN} +---- + +-- + +.Verification +. Go to the {product-short} login page. +. Your {product-short} sign-in page displays *Sign in using GitHub* and the Guest user sign-in is disabled. +. Log in with GitHub. + diff --git a/modules/authentication/proc-enabling-authentication-with-microsoft-azure.adoc b/modules/authentication/proc-enabling-authentication-with-microsoft-azure.adoc new file mode 100644 index 0000000000..517c38c40c --- /dev/null +++ b/modules/authentication/proc-enabling-authentication-with-microsoft-azure.adoc @@ -0,0 +1,147 @@ +[id="enabling-authentication-with-microsoft-azure"] += Enabling authentication with Microsoft Azure + +{product} includes a Microsoft Azure authentication provider that can authenticate users by using OAuth. + +.Prerequisites +. You have the permission to register an application in Microsoft Azure. +. You link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/administration_guide_for_red_hat_developer_hub/index#assembly-add-custom-app-file-openshift_admin-rhdh[added a custom {product-short} application configuration]. + +.Procedure +. To allow {product-short} to authenticate with Microsoft Azure, link:https://learn.microsoft.com/en-us/entra/identity-platform/scenario-web-app-sign-user-app-registration?tabs=aspnetcore#register-an-app-by-using-the-azure-portal[create an OAuth application in Microsoft Azure]. + +.. In the Azure portal go to link:https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationsListBlade[*App registrations*], create a **New registration** with the configuration: ++ +**Name**:: The application name in Azure, such as ____. + +.. On the *Home > App registrations > ____ > Manage > Authentication* page, *Add a platform*, with the following configuration: + +*Redirect URI*:: Enter the backend authentication URI set in {product-short}: `pass:c,a,q[{my-product-url}/api/auth/microsoft/handler/frame]` +*Front-channel logout URL*:: Leave blank. +*Implicit grant and hybrid flows*:: Leave all checkboxes cleared. + +.. On the *Home > App registrations > ____ > Manage > API permissions* page, *Add a Permission*, then add the following *Delegated permission* for the *Microsoft Graph API*: ++ +* `email` +* `offline_access` +* `openid` +* `profile` +* `User.Read` +* Optional custom scopes for the Microsoft Graph API that you define both in this section and in the {product-short} configuration (`app-config-rhdh.yaml`). ++ +[NOTE] +==== +Your company might require you to grant admin consent for these permissions. +Even if your company does not require admin consent, you might do so as it means users do not need to individually consent the first time they access backstage. +To grant administrator consent, a directory administrator must go to the link:https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/user-admin-consent-overview[admin consent] page and click *Grant admin consent for COMPANY NAME*. +==== + + +.. On the *Home > App registrations > ____ > Manage > Certificates & Secrets* page, in the *Client secrets* tab, create a *New client secret*. + +.. Save for the next step: +- **Directory (tenant) ID** +- **Application (client) ID** +- **Application (client) secret** + +. To add your Microsoft Azure credentials to {product-short}, add the following key/value pairs to your {product-short} secrets, such as `secrets-rhdh`: ++ +`AUTH_AZURE_TENANT_ID`:: Enter your saved *Directory (tenant) ID*. +`AUTH_AZURE_CLIENT_ID`:: Enter your saved *Application (client) ID*. +`AUTH_AZURE_CLIENT_SECRET`:: Enter your saved *Application (client) secret*. + +. Set up the Microsoft Azure authentication provider in your {product-short} custom configuration, such as `app-config-rhdh`: ++ +-- +.`app-config-rhdh.yaml` fragment +[source,yaml,subs="+quotes,+attributes"] +---- +auth: + environment: production + providers: + microsoft: + production: + clientId: ${AUTH_AZURE_CLIENT_ID} + clientSecret: ${AUTH_AZURE_CLIENT_SECRET} + tenantId: ${AUTH_AZURE_TENANT_ID} +signInPage: microsoft +---- + +`environment: production`:: +Mark the environment as production to hide the **Guest** login in the {product-short} home page. + +`clientId`, `clientSecret` and `tenantId`:: +Use the {product-short} application information that you have created in Microsoft Azure and configured in OpenShift as secrets. + +`signInPage: microsoft`:: +Enable the Microsoft Azure provider as default sign-in provider. + +Optional: Consider adding following optional fields: + +`dangerouslyAllowSignInWithoutUserInCatalog: true`:: ++ +To enable authentication without requiring to provision users in the {product-short} software catalog. ++ +WARNING: Use `dangerouslyAllowSignInWithoutUserInCatalog` to explore {product-short} features, but do not use it in production. ++ +.`app-config-rhdh.yaml` fragment with optional field to allow authenticating users absent from the software catalog +[source,yaml] +---- +auth: + environment: production + providers: + microsoft: + production: + clientId: ${AUTH_AZURE_CLIENT_ID} + clientSecret: ${AUTH_AZURE_CLIENT_SECRET} + tenantId: ${AUTH_AZURE_TENANT_ID} +signInPage: microsoft +dangerouslyAllowSignInWithoutUserInCatalog: true +---- + +`domainHint`:: +Optional for single-tenant applications. +You can reduce login friction for users with accounts in multiple tenants by automatically filtering out accounts from other tenants. +If you want to use this parameter for a single-tenant application, uncomment and enter the tenant ID. +If your application registration is multi-tenant, leave this parameter blank. +For more information, see link:https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/home-realm-discovery-policy[Home Realm Discovery]. ++ +.`app-config-rhdh.yaml` fragment with optional `domainHint` field +[source,yaml,subs="+quotes,+attributes"] +---- +auth: + environment: production + providers: + microsoft: + production: + domainHint: ${AUTH_AZURE_TENANT_ID} +---- + +`additionalScopes`:: +Optional for additional scopes. +To add scopes for the application registration, uncomment and enter the list of scopes that you want to add. +The default and mandatory value lists: `'openid', 'offline_access', 'profile', 'email', 'User.Read'`. ++ +.`app-config-rhdh.yaml` fragment with optional `additionalScopes` field +[source,yaml,subs="+quotes,+attributes"] +---- +auth: + environment: production + providers: + microsoft: + production: + additionalScopes: + - Mail.Send +---- +-- + +[NOTE] +==== +This step is optional for environments with outgoing access restrictions, such as firewall rules. + If your environment has such restrictions, ensure that your {product-very-short} backend can access the following hosts: + +* `login.microsoftonline.com`: For obtaining and exchanging authorization codes and access tokens. + +* `graph.microsoft.com`: For retrieving user profile information (as referenced in the source code). +If this host is unreachable, you might see an _Authentication failed, failed to fetch user profile_ error when attempting to log in. +==== diff --git a/modules/authentication/proc-enabling-authentication-with-rhsso.adoc b/modules/authentication/proc-enabling-authentication-with-rhsso.adoc new file mode 100644 index 0000000000..c8850771a1 --- /dev/null +++ b/modules/authentication/proc-enabling-authentication-with-rhsso.adoc @@ -0,0 +1,172 @@ +[id="enabling-authentication-with-rhsso"] += Enabling authentication with {rhsso-brand-name} ({rhsso}) + +To authenticate users with Red Hat Single Sign-On ({rhsso}), enable the OpenID Connect (OIDC) authentication provider in {product}. + + +.Prerequisites +* You link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html/administration_guide_for_red_hat_developer_hub/assembly-add-custom-app-file-openshift_admin-rhdh[added a custom {product-short} application configuration], and have sufficient permissions to modify it. +* You have sufficient permissions in {rhsso} to create and manage a realm. + +.Procedure +. To allow {product-short} to authenticate with {rhsso}, complete the steps in {rhsso}, to link:https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/getting_started_guide/index#realms-apps_[create a realm and a user] and link:https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/getting_started_guide/index#registering-app_[register the {product-short} application]: + +.. Use an existing realm, or link:https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/getting_started_guide/index#create-realm_[create a realm], with a distinctive **Name** such as ____. +Save the value for the next step: +* **{rhsso} realm base URL**, such as: ____/auth/realms/____. + +.. To register your {product-short} in {rhsso}, in the created realm, link:https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/getting_started_guide/index#registering-app_[create a Client ID], with: +... **Client ID**: A distinctive client ID, such as __<{product-very-short}>__. +... **Valid redirect URIs**: Set to the OIDC handler URL: `https://____/api/auth/oidc/handler/frame`. +... Navigate to the **Credentials** tab and copy the **Client secret**. +... Save the values for the next step: +* **Client ID** +* **Client Secret** + +.. To prepare for the verification steps, in the same realm, get the credential information for an existing user or link:https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/getting_started_guide/index#create-user_[create a user]. Save the user credential information for the verification steps. + +. To add your {rhsso} credentials to your {product-short} secrets, edit your {product-short} secrets, such as `secrets-rhdh`, and add the following key/value pairs: ++ +`AUTH_OIDC_CLIENT_ID`:: Enter the saved **Client ID**. +`AUTH_OIDC_CLIENT_SECRET`:: Enter the saved **Client Secret**. +`AUTH_OIDC_METADATA_URL`:: Enter the saved **{rhsso} realm base URL**. + +. To set up the {rhsso} authentication provider in your {product-short} custom configuration, edit your custom {product-short} ConfigMap such as `app-config-rhdh`, and add the following lines to the `app-config-rhdh.yaml` content: ++ +-- +.`app-config-rhdh.yaml` fragment with mandatory fields to enable authentication with {rhsso} +[source,yaml] +---- +auth: + environment: production + providers: + oidc: + production: + metadataUrl: ${AUTH_OIDC_METADATA_URL} + clientId: ${AUTH_OIDC_CLIENT_ID} + clientSecret: ${AUTH_OIDC_CLIENT_SECRET} +signInPage: oidc +---- + +`environment: production`:: +Mark the environment as `production` to hide the Guest login in the {product-short} home page. + +`metadataUrl`, `clientId`, `clientSecret`:: +To configure the OIDC provider with your secrets. + +`sigInPage: oidc`:: +To enable the OIDC provider as default sign-in provider. + + +Optional: Consider adding the following optional fields: + +`dangerouslyAllowSignInWithoutUserInCatalog: true`:: ++ +-- +To enable authentication without requiring to provision users in the {product-short} software catalog. ++ +WARNING: Use this option to explore {product-short} features, but do not use it in production. ++ +.`app-config-rhdh.yaml` fragment with optional field to allow authenticating users absent from the software catalog +[source,yaml] +---- +auth: + environment: production + providers: + oidc: + production: + metadataUrl: ${AUTH_OIDC_METADATA_URL} + clientId: ${AUTH_OIDC_CLIENT_ID} + clientSecret: ${AUTH_OIDC_CLIENT_SECRET} +signInPage: oidc +dangerouslyAllowSignInWithoutUserInCatalog: true +---- +-- + +`callbackUrl`:: +-- +{rhsso} callback URL. + +.`app-config-rhdh.yaml` fragment with optional `callbackURL` field +[source,yaml] +---- +auth: + providers: + oidc: + production: + callbackUrl: ${AUTH_OIDC_CALLBACK_URL} +---- +-- + +`tokenEndpointAuthMethod`:: +-- +Token endpoint authentication method. + +.`app-config-rhdh.yaml` fragment with optional `tokenEndpointAuthMethod` field +[source,yaml] +---- +auth: + providers: + oidc: + production: + tokenEndpointAuthMethod: ${AUTH_OIDC_TOKEN_ENDPOINT_METHOD} +---- +-- + +`tokenSignedResponseAlg`:: +-- +Token signed response algorithm. + +.`app-config-rhdh.yaml` fragment with optional `tokenSignedResponseAlg` field +[source,yaml] +---- +auth: + providers: + oidc: + production: + tokenSignedResponseAlg: ${AUTH_OIDC_SIGNED_RESPONSE_ALG} +---- +-- + +`scope`:: +-- +{rhsso} scope. + +.`app-config-rhdh.yaml` fragment with optional `scope` field +[source,yaml] +---- +auth: + providers: + oidc: + production: + scope: ${AUTH_OIDC_SCOPE} +---- +-- + +`signIn.resolvers`:: +-- +Declarative resolvers to override the default resolver: `emailLocalPartMatchingUserEntityName`. +The authentication provider tries each sign-in resolver until it succeeds, and fails if none succeed. + +.`app-config-rhdh.yaml` fragment with optional `callbackURL` field +[source,yaml] +---- +auth: + providers: + oidc: + production: + signIn: + resolvers: + - resolver: preferredUsernameMatchingUserEntityName + - resolver: emailMatchingUserEntityProfileEmail + - resolver: emailLocalPartMatchingUserEntityName +---- +-- + +-- + +.Verification +. Go to the {product-short} login page. +. Your {product-short} sign-in page displays *Sign in using OIDC* and the Guest user sign-in is disabled. +. Log in with OIDC by using the saved **Username** and **Password** values. + diff --git a/modules/authentication/proc-provisioning-users-from-github-to-the-software-catalog.adoc b/modules/authentication/proc-provisioning-users-from-github-to-the-software-catalog.adoc new file mode 100644 index 0000000000..b1d001b67e --- /dev/null +++ b/modules/authentication/proc-provisioning-users-from-github-to-the-software-catalog.adoc @@ -0,0 +1,76 @@ +[id="provisioning-users-from-github-to-the-software-catalog"] += Provisioning users from GitHub to the software catalog + +To authenticate users, {product} requires their presence in the software catalog. +Consider configuring {product-short} to provision users from GitHub to the software catalog on schedule, rather than provisioning the users manually. + +.Prerequisites +* You have xref:enabling-authentication-with-github[enabled authentication with GitHub], including the following secrets: +** `GITHUB_HOST_DOMAIN` +** `GITHUB_ORGANIZATION` + +.Procedure + +* To enable GitHub member discovery, edit your custom {product-short} ConfigMap, such as `app-config-rhdh`, and add the following lines to the `app-config-rhdh.yaml` content: ++ +-- +[id=githubProviderId] +.`app-config.yaml` fragment with mandatory `github` fields +[source,yaml] +---- +dangerouslyAllowSignInWithoutUserInCatalog: false +catalog: + providers: + github: + providerId: + organization: "${GITHUB_ORGANIZATION}" + schedule: + frequency: + minutes: 30 + initialDelay: + seconds: 15 + timeout: + minutes: 15 + githubOrg: + githubUrl: "${GITHUB_HOST_DOMAIN}" + orgs: [ "${GITHUB_ORGANIZATION}" ] + schedule: + frequency: + minutes: 30 + initialDelay: + seconds: 15 + timeout: + minutes: 15 +---- + +`dangerouslyAllowSignInWithoutUserInCatalog: false`:: +Allow authentication only for users present in the {product-short} software catalog. + +`organization`, `githubUrl`, and `orgs`:: +Use the {product-short} application information that you have created in GitHub and configured in OpenShift as secrets. + +`schedule.frequency`:: +To specify custom schedule frequency. +Supports cron, ISO duration, and "human duration" as used in code. + +`schedule.timeout`:: +To specify custom timeout. +Supports ISO duration and "human duration" as used in code. + +`schedule.initialDelay`:: +To specify custom initial delay. +Supports ISO duration and "human duration" as used in code. +-- + +.Verification +. Check the console logs to verify that the synchronization is completed. ++ +.Successful synchronization example: +[source,json] +---- +{"class":"GithubMultiOrgEntityProvider","level":"info","message":"Reading GitHub users and teams for org: rhdh-dast","plugin":"catalog","service":"backstage","target":"https://github.com","taskId":"GithubMultiOrgEntityProvider:production:refresh","taskInstanceId":"801b3c6c-167f-473b-b43e-e0b4b780c384","timestamp":"2024-09-09 23:55:58"} +{"class":"GithubMultiOrgEntityProvider","level":"info","message":"Read 7 GitHub users and 2 GitHub groups in 0.4 seconds. Committing...","plugin":"catalog","service":"backstage","target":"https://github.com","taskId":"GithubMultiOrgEntityProvider:production:refresh","taskInstanceId":"801b3c6c-167f-473b-b43e-e0b4b780c384","timestamp":"2024-09-09 23:55:59"} +---- + +. Log in with a GitHub account. + diff --git a/modules/authentication/proc-provisioning-users-from-microsoft-azure-to-the-software-catalog.adoc b/modules/authentication/proc-provisioning-users-from-microsoft-azure-to-the-software-catalog.adoc new file mode 100644 index 0000000000..501d6416a4 --- /dev/null +++ b/modules/authentication/proc-provisioning-users-from-microsoft-azure-to-the-software-catalog.adoc @@ -0,0 +1,291 @@ +[id="provisioning-users-from-microsoft-azure-to-the-software-catalog"] += Provisioning users from Microsoft Azure to the software catalog + +To authenticate users with Microsoft Azure, after xref:enabling-authentication-with-microsoft-azure[Enabling authentication with Microsoft Azure], provision users from Microsoft Azure to the {product-short} software catalog. + +.Prerequisites +* You have xref:enabling-authentication-with-microsoft-azure[enabled authentication with Microsoft Azure]. + +.Procedure +* To enable Microsoft Azure member discovery, edit your custom {product-short} ConfigMap, such as `app-config-rhdh`, and add following lines to the `app-config-rhdh.yaml` content: ++ +-- +[id=microsoftGraphOrgProviderId] +.`app-config.yaml` fragment with mandatory `microsoftGraphOrg` fields +[source,yaml] +---- +dangerouslyAllowSignInWithoutUserInCatalog: false +catalog: + providers: + microsoftGraphOrg: + providerId: + target: https://graph.microsoft.com/v1.0 + tenantId: ${AUTH_AZURE_TENANT_ID} + clientId: ${AUTH_AZURE_CLIENT_ID} + clientSecret: ${AUTH_AZURE_CLIENT_SECRET} +---- + +`dangerouslyAllowSignInWithoutUserInCatalog: false`:: +Allow authentication only for users in the {product-short} software catalog. + +`target: https://graph.microsoft.com/v1.0`:: +Defines the MSGraph API endpoint the provider is connecting to. +You might change this parameter to use a different version, such as the link:https://learn.microsoft.com/en-us/graph/api/overview?view=graph-rest-beta#call-the-beta-endpoint[beta endpoint]. + +`tenandId`, `clientId` and `clientSecret`:: +Use the {product-short} application information you created in Microsoft Azure and configured in OpenShift as secrets. + +Optional: Consider adding the following optional `microsoftGraphOrg.providerId` fields: + +[id=authority] +`authority: https://login.microsoftonline.com`:: +Defines the authority used. +Change the value to use a different link:https://learn.microsoft.com/en-us/graph/deployments#app-registration-and-token-service-root-endpoints[authority], such as Azure US government. +Default value: `https://login.microsoftonline.com`. ++ +.`app-config.yaml` fragment with optional `queryMode` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + authority: https://login.microsoftonline.com/ +---- +[id=queryMode] +`queryMode: basic | advanced`:: By default, the Microsoft Graph API only provides the `basic` feature set for querying. +Certain features require `advanced` querying capabilities. +See link:https://docs.microsoft.com/en-us/graph/aad-advanced-queries[Microsoft Azure Advanced queries]. ++ +.`app-config.yaml` fragment with optional `queryMode` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + queryMode: advanced +---- + +[id=userExpand] +`user.expand`:: +To include the expanded resource or collection referenced by a single relationship (navigation property) in your results. +Only one relationship can be expanded in a single request. +See https://docs.microsoft.com/en-us/graph/query-parameters#expand-parameter[Microsoft Graph query expand parameter]. +This parameter can be combined with xref:userGroupMemberFilter[] or xref:userFilter[]. ++ +.`app-config.yaml` fragment with optional `user.expand` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + user: + expand: manager +---- + +[id=userFilter] +`user.filter`:: +To filter users. +See link:https://docs.microsoft.com/en-us/graph/api/resources/user?view=graph-rest-1.0#properties[Microsoft Graph API] and link:https://docs.microsoft.com/en-us/graph/query-parameters#filter-parameter[Microsoft Graph API query filter parameters syntax]. +This parameter and xref:userGroupMemberFilter[] are mutually exclusive, only one can be specified. ++ +.`app-config.yaml` fragment with optional `user.filter` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + user: + filter: accountEnabled eq true and userType eq 'member' +---- + +[id=userLoadPhotos] +`user.loadPhotos: true | false`:: +Load photos by default. +Set to `false` to not load user photos. ++ +.`app-config.yaml` fragment with optional `user.loadPhotos` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + user: + loadPhotos: true +---- + +[id=userSelect] +`user.select`:: +Define the link:https://docs.microsoft.com/en-us/graph/api/resources/schemaextension?view=graph-rest-1.0[Microsoft Graph resource types] to retrieve. ++ +.`app-config.yaml` fragment with optional `user.select` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + user: + select: ['id', 'displayName', 'description'] +---- + +[id="userGroupMemberFilter"] +`userGroupMember.filter`::: +To use group membership to get users. +To filter groups and fetch their members. +This parameter and xref:userFilter[] are mutually exclusive, only one can be specified. ++ +.`app-config.yaml` fragment with optional `userGroupMember.filter` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + userGroupMember: + filter: "displayName eq 'Backstage Users'" +---- + +[id="userGroupMemberSearch"] +`userGroupMember.search`:: +To use group membership to get users. +To search for groups and fetch their members. +This parameter and xref:userFilter[] are mutually exclusive, only one can be specified. ++ +.`app-config.yaml` fragment with optional `userGroupMember.search` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + userGroupMember: + search: '"description:One" AND ("displayName:Video" OR "displayName:Drive")' +---- + +[id=groupExpand] +`group.expand`:: +Optional parameter to include the expanded resource or collection referenced by a single relationship (navigation property) in your results. +Only one relationship can be expanded in a single request. +See https://docs.microsoft.com/en-us/graph/query-parameters#expand-parameter +This parameter can be combined with xref:userGroupMemberFilter[] instead of xref:userFilter[]. ++ +.`app-config.yaml` fragment with optional `group.expand` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + group: + expand: member +---- + +[id=groupFilter] +`group.filter`:: +To filter groups. +See link:https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties[Microsoft Graph API query group syntax]. ++ +.`app-config.yaml` fragment with optional `group.filter` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + group: + filter: securityEnabled eq false and mailEnabled eq true and groupTypes/any(c:c+eq+'Unified') +---- + +[id=groupSearch] +`group.search`:: +To search for groups. +See link:https://docs.microsoft.com/en-us/graph/search-query-parameter[Microsoft Graph API query search parameter]. ++ +.`app-config.yaml` fragment with optional `group.search` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + group: + search: '"description:One" AND ("displayName:Video" OR "displayName:Drive")' +---- + +[id=groupSelect] +`group.select`:: +To define the link:https://docs.microsoft.com/en-us/graph/api/resources/schemaextension?view=graph-rest-1.0[Microsoft Graph resource types] to retrieve. ++ +.`app-config.yaml` fragment with optional `group.select` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + group: + select: ['id', 'displayName', 'description'] +---- + +`schedule.frequency`:: +To specify custom schedule frequency. +Supports cron, ISO duration, and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.frequency` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + schedule: + frequency: { hours: 1 } +---- + +`schedule.timeout`:: +To specify custom timeout. +Supports ISO duration and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.timeout` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + schedule: + timeout: { minutes: 50 } +---- + +`schedule.initialDelay`:: +To specify custom initial delay. +Supports ISO duration and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.initialDelay` field +[source,yaml] +---- +catalog: + providers: + microsoftGraphOrg: + providerId: + schedule: + initialDelay: { seconds: 15} +---- +-- + +.Verification +. Check the console logs to verify that the synchronization is completed. ++ +.Successful synchronization example: +[source,json] +---- +backend:start: {"class":"MicrosoftGraphOrgEntityProvider$1","level":"info","message":"Read 1 msgraph users and 1 msgraph groups in 2.2 seconds. Committing...","plugin":"catalog","service":"backstage","taskId":"MicrosoftGraphOrgEntityProvider:default:refresh","taskInstanceId":"88a67ce1-c466-41a4-9760-825e16b946be","timestamp":"2024-06-26 12:23:42"} +backend:start: {"class":"MicrosoftGraphOrgEntityProvider$1","level":"info","message":"Committed 1 msgraph users and 1 msgraph groups in 0.0 seconds.","plugin":"catalog","service":"backstage","taskId":"MicrosoftGraphOrgEntityProvider:default:refresh","taskInstanceId":"88a67ce1-c466-41a4-9760-825e16b946be","timestamp":"2024-06-26 12:23:42"} +---- + +. Log in with a Microsoft Azure account. diff --git a/modules/authentication/proc-provisioning-users-from-rhsso-to-the-software-catalog.adoc b/modules/authentication/proc-provisioning-users-from-rhsso-to-the-software-catalog.adoc new file mode 100644 index 0000000000..ca4c10ad08 --- /dev/null +++ b/modules/authentication/proc-provisioning-users-from-rhsso-to-the-software-catalog.adoc @@ -0,0 +1,153 @@ +[id="provisioning-users-from-rhsso-to-the-software-catalog"] += Provisioning users from {rhsso-brand-name} ({rhsso}) to the software catalog + +.Prerequisites +* You xref:enabling-authentication-with-rhsso[enabled authentication with {rhsso}]. + +.Procedure + +* To enable {rhsso} member discovery, edit your custom {product-short} ConfigMap, such as `app-config-rhdh`, and add the following lines to the `app-config-rhdh.yaml` content: ++ +-- +[id=keycloakOrgProviderId] +.`app-config.yaml` fragment with mandatory `keycloakOrg` fields +[source,yaml] +---- +dangerouslyAllowSignInWithoutUserInCatalog: false +catalog: + providers: + keycloakOrg: + default: + baseUrl: ${AUTH_OIDC_METADATA_URL} + clientId: ${AUTH_OIDC_CLIENT_ID} + clientSecret: ${AUTH_OIDC_CLIENT_SECRET} +---- + +`dangerouslyAllowSignInWithoutUserInCatalog: false`:: + Allow authentication only for users present in the {product-short} software catalog. + +`baseUrl`:: +Your {rhsso} server URL, defined when xref:enabling-authentication-with-rhsso[enabling authentication with {rhsso}]. + +`clientId`:: +Your {product-short} application client ID in {rhsso}, defined when xref:enabling-authentication-with-rhsso[enabling authentication with {rhsso}]. + +`clientSecret`:: +Your {product-short} application client secret in {rhsso}, defined when xref:enabling-authentication-with-rhsso[enabling authentication with {rhsso}]. + +Optional: Consider adding the following optional fields: + +`realm`:: +Realm to synchronize. +Default value: `master`. ++ +.`app-config.yaml` fragment with optional `realm` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + realm: master +---- + +`loginRealm`:: +Realm used to authenticate. +Default value: `master`. ++ +.`app-config.yaml` fragment with optional `loginRealm` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + loginRealm: master +---- + +`userQuerySize`:: +User number to query simultaneously. +Default value: `100`. ++ +.`app-config.yaml` fragment with optional `userQuerySize` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + userQuerySize: 100 +---- + +`groupQuerySize`:: +Group number to query simultaneously. +Default value: `100`. ++ +.`app-config.yaml` fragment with optional `groupQuerySize` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + groupQuerySize: 100 +---- + +`schedule.frequency`:: +To specify custom schedule frequency. +Supports cron, ISO duration, and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.frequency` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + schedule: + frequency: { hours: 1 } +---- + +`schedule.timeout`:: +To specify custom timeout. +Supports ISO duration and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.timeout` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + schedule: + timeout: { minutes: 50 } +---- + +`schedule.initialDelay`:: +To specify custom initial delay. +Supports ISO duration and "human duration" as used in code. ++ +.`app-config.yaml` fragment with optional `schedule.initialDelay` field +[source,yaml] +---- +catalog: + providers: + keycloakOrg: + default: + schedule: + initialDelay: { seconds: 15} +---- +-- + +.Verification + +. Check the console logs to verify that the synchronization is completed. ++ +.Successful synchronization example: +[source,json] +---- +{"class":"KeycloakOrgEntityProvider","level":"info","message":"Read 3 Keycloak users and 2 Keycloak groups in 1.5 seconds. Committing...","plugin":"catalog","service":"backstage","taskId":"KeycloakOrgEntityProvider:default:refresh","taskInstanceId":"bf0467ff-8ac4-4702-911c-380270e44dea","timestamp":"2024-09-25 13:58:04"} +{"class":"KeycloakOrgEntityProvider","level":"info","message":"Committed 3 Keycloak users and 2 Keycloak groups in 0.0 seconds.","plugin":"catalog","service":"backstage","taskId":"KeycloakOrgEntityProvider:default:refresh","taskInstanceId":"bf0467ff-8ac4-4702-911c-380270e44dea","timestamp":"2024-09-25 13:58:04"} +---- + +. Log in with an {rhsso} account. diff --git a/modules/authorization/con-rbac-conditional-policies-rhdh.adoc b/modules/authorization/con-rbac-conditional-policies-rhdh.adoc new file mode 100644 index 0000000000..637c36a6b7 --- /dev/null +++ b/modules/authorization/con-rbac-conditional-policies-rhdh.adoc @@ -0,0 +1,117 @@ +[id='con-rbac-conditional-policies-rhdh_{context}'] += Conditional policies in {product} + +The permission framework in {product} provides conditions, supported by the RBAC backend plugin (`backstage-plugin-rbac-backend`). The conditions work as content filters for the {product-short} resources that are provided by the RBAC backend plugin. + +The RBAC backend API stores conditions assigned to roles in the database. When you request to access the frontend resources, the RBAC backend API searches for the corresponding conditions and delegates them to the appropriate plugin using its plugin ID. If you are assigned to multiple roles with different conditions, then the RBAC backend merges the conditions using the `anyOf` criteria. + +Conditional criteria:: ++ +-- +A condition in {product-short} is a simple condition with a rule and parameters. However, a condition can also contain a parameter or an array of parameters combined by conditional criteria. The supported conditional criteria includes: + +* `allOf`: Ensures that all conditions within the array must be true for the combined condition to be satisfied. + +* `anyOf`: Ensures that at least one of the conditions within the array must be true for the combined condition to be satisfied. + +* `not`: Ensures that the condition within it must not be true for the combined condition to be satisfied. +-- + +Conditional object:: ++ +-- +The plugin specifies the parameters supported for conditions. You can access the conditional object schema from the RBAC API endpoint to understand how to construct a conditional JSON object, which is then used by the RBAC backend plugin API. + +A conditional object contains the following parameters: + +.Conditional object parameters +[cols="30%,45%,25%", frame="all", options="header"] +|=== +|Parameter +|Type +|Description + +|`result` +|String +|Always has the value `CONDITIONAL` + +|`roleEntityRef` +|String +|String entity reference to the RBAC role, such as `role:default/dev` + +|`pluginId` +|String +|Corresponding plugin ID, such as `catalog` + +|`permissionMapping` +|String array +|Array permission actions, such as `['read', 'update', 'delete']` + +|`resourceType` +|String +|Resource type provided by the plugin, such as `catalog-entity` + +|`conditions` +|JSON +|Condition JSON with parameters or array parameters joined by criteria + +|=== +-- + +Conditional policy aliases:: ++ +-- +The RBAC backend plugin (`backstage-plugin-rbac-backend`) supports the use of aliases in conditional policy rule parameters. The conditional policy aliases are dynamically replaced with the corresponding values during policy evaluation. Each alias in conditional policy is prefixed with a `$` sign indicating its special function. + +The supported conditional aliases include: + +* `$currentUser`: This alias is replaced with the user entity reference for the user who requests access to the resource. For example, if user Tom from the default namespace requests access, `$currentUser` becomes `user:default/tom`. ++ +-- + +.Example conditional policy object with `$currentUser` alias +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/developer", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["delete"], + "conditions": { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["$currentUser"] + } + } +} +---- +-- + +* `$ownerRefs`: This alias is replaced with ownership references, usually as an array that includes the user entity reference and the user's parent group entity reference. For example, for user Tom from team-a, `$ownerRefs` becomes `['user:default/tom', 'group:default/team-a']`. ++ +-- +.Example conditional policy object with `$ownerRefs` alias +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/developer", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["delete"], + "conditions": { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["$ownerRefs"] + } + } +} +---- +-- +-- + + + diff --git a/modules/authorization/con-rbac-config-permission-policies-admin.adoc b/modules/authorization/con-rbac-config-permission-policies-admin.adoc new file mode 100644 index 0000000000..15e8f74adb --- /dev/null +++ b/modules/authorization/con-rbac-config-permission-policies-admin.adoc @@ -0,0 +1,36 @@ +[id='con-rbac-config-permission-policies-admin_{context}'] += Configuration of permission policies administrators + +The permission policies for users and groups in the {product-short} are managed by permission policy administrators. Only permission policy administrators can access the Role-Based Access Control REST API. + +The purpose of configuring policy administrators is to enable a specific, restricted number of authenticated users to access the RBAC REST API. The permission policies are defined in a `policy.csv` file, which is referenced in the `app-config-rhdh` ConfigMap. OpenShift platform administrators or cluster administrators can perform this task with access to the namespace where {product} is deployed. + +You can enable a permission policy administrator by configuring the `app-config.yaml` file as follows: + +[source,yaml] +---- +permission: + enabled: true + rbac: + admin: + users: + - name: user:default/joeuser +---- + +The permission policy role (`role:default/rbac_admin`) is a default role in {product-short} and includes some permissions upon creation, such as creating, reading, updating, and deleting permission policies/roles, as well as reading from the catalog. + +If the default permissions are not adequate for your requirements, you can define a new administrator role tailored to your requirements using relevant permission policies. Alternatively, you can use the optional `superUsers` configuration value, which grants unrestricted permissions across {product-short}. + +You can set the `superUsers` in the `app-config.yaml` file as follows: + +[source,yaml] +---- +# ... +permission: + enabled: true + rbac: + admin: + superUsers: + - name: user:default/joeuser + # ... +---- diff --git a/modules/authorization/con-rbac-config-permission-policies-external-file.adoc b/modules/authorization/con-rbac-config-permission-policies-external-file.adoc new file mode 100644 index 0000000000..6ef78a60c0 --- /dev/null +++ b/modules/authorization/con-rbac-config-permission-policies-external-file.adoc @@ -0,0 +1,66 @@ +[id='con-rbac-config-permission-policies-external-file_{context}'] += Configuration of permission policies defined in an external file + +You can configure the permission policies before starting the {product}. If permission policies are defined in an external file, then you can import the same file in the {product-short}. You must define the permission policies using the following Casbin rules format: + +[source,format] +--- +`p, , , , ` +--- + +You can define roles using the following Casbin rules format: + +[source,format] +--- +`g, , ` +--- + +[NOTE] +==== +For information about the Casbin rules format, see https://casbin.org/docs/category/the-basics[Basics of Casbin rules]. +==== + +The following is an example of permission policies configuration: + +[source,csv] +--- +`p, role:default/guests, catalog-entity, read, allow` + +`p, role:default/guests, catalog.entity.create, create, allow` + +`g, user:default/, role:default/guests` + +`g, group:default/, role:default/guests` +--- + +If a defined permission does not contain an action associated with it, then add `use` as a policy. See the following example: + +[source,csv] +--- +`p, role:default/guests, kubernetes.proxy, use, allow` +--- + +You can define the `policy.csv` file path in the `app-config.yaml` file: + +[source,yaml] +---- +permission: + enabled: true + rbac: + policies-csv-file: /some/path/rbac-policy.csv +---- + +You can use an optional configuration value that enables reloading the CSV file without restarting the {product-short} instance. + +Set the value of the `policyFileReload` option in the `app-config.yaml` file: + +[source,yaml] +---- +# ... +permission: + enabled: true + rbac: + policies-csv-file: /some/path/rbac-policy.csv + policyFileReload: true + # ... +---- diff --git a/modules/authorization/con-rbac-config-permission-policies.adoc b/modules/authorization/con-rbac-config-permission-policies.adoc new file mode 100644 index 0000000000..af03c52bba --- /dev/null +++ b/modules/authorization/con-rbac-config-permission-policies.adoc @@ -0,0 +1,7 @@ +[id='con-rbac-config-permission-policies_{context}'] += Permission policies configuration + +There are two approaches to configure the permission policies in {product}, including: + +* Configuration of permission policies administrators +* Configuration of permission policies defined in an external file diff --git a/modules/authorization/con-rbac-overview.adoc b/modules/authorization/con-rbac-overview.adoc new file mode 100644 index 0000000000..b050019444 --- /dev/null +++ b/modules/authorization/con-rbac-overview.adoc @@ -0,0 +1,6 @@ +[id='con-rbac-overview_{context}'] += Role-Based Access Control (RBAC) in {product} + +Role-Based Access Control is a security paradigm that restricts access to authorized users. This feature includes defining roles with specific permissions and then assigning those roles to the users. + +The {product} uses RBAC to improve the permission system within the platform. The RBAC feature in {product-short} introduces an administrator role and leverages the organizational structure including teams, groups, and users by facilitating efficient access control. diff --git a/modules/authorization/con-rbac-rest-api.adoc b/modules/authorization/con-rbac-rest-api.adoc new file mode 100644 index 0000000000..ea956895bc --- /dev/null +++ b/modules/authorization/con-rbac-rest-api.adoc @@ -0,0 +1,119 @@ +[id='con-rbac-rest-api_{context}'] += Role-based Access Control (RBAC) REST API + +{product} provides RBAC REST API that you can use to manage the permissions and roles in the {product-short}. This API supports you to facilitate and automate the maintenance of {product-short} permission policies and roles. + +Using the RBAC REST API, you can perform the following actions: + +* Retrieve information about all permission policies or specific permission policies, or roles +* Create, update, or delete a permission policy or a role +* Retrieve permission policy information about static plugins + +The RBAC REST API requires the following components: + +Authorization:: ++ +-- + +The RBAC REST API requires Bearer token authorization for the permitted user role. For development purposes, you can access a web console in a browser. When you refresh a token request in the list of network requests, you find the token in the response JSON. + +`Authorization: Bearer $token` + +For example, on the {product-short} *Homepage*, navigate to the *Network* tab and search for the `query?term=` network call. Alternatively, you can go to the *Catalog* page and select any Catalog API network call to acquire the Bearer token. +-- + +HTTP methods:: ++ +-- + +The RBAC REST API supports the following HTTP methods for API requests: + +* `GET`: Retrieves specified information from a specified resource endpoint +* `POST`: Creates or updates a resource +* `PUT`: Updates a resource +* `DELETE`: Deletes a resource +-- + +Base URL:: ++ +-- + +The base URL for RBAC REST API requests is `pass:c[http://SERVER:PORT/api/permission/policies]`, such as `pass:c[http://localhost:7007/api/permission/policies]`. + +-- + +Endpoints:: ++ +-- + +RBAC REST API endpoints, such as `/api/permission/policies/[kind]/[namespace]/[name]` for specified `kind`, `namespace`, and `name`, are the URI that you append to the base URL to access the corresponding resource. + +Example request URL for `/api/permission/policies/[kind]/[namespace]/[name]` endpoint is: + +`pass:c[http://localhost:7007/api/permission/policies/user/default/johndoe]` + +[NOTE] +==== +If at least one permission is assigned to `user:default/johndoe`, then the example request URL mentioned previously returns a result if sent in a `GET` response with a valid authorization token. However, if permission is only assigned to roles, then the example request URL does not return an output. +==== +-- + +Request data:: ++ +-- + +HTTP `POST` requests in the RBAC REST API may require a JSON request body with data to accompany the request. + +Example `POST` request URL and JSON request body data for +`pass:c[http://localhost:7007/api/permission/policies]`: + +[source,json] +---- +{ + "entityReference": "role:default/test", + "permission": "catalog-entity", + "policy": "delete", + "effect": "allow" +} +---- +-- + +HTTP status codes:: ++ +-- + +The RBAC REST API supports the following HTTP status codes to return as responses: + +* `200` OK: The request was successful. +* `201` Created: The request resulted in a new resource being successfully created. +* `204` No Content: The request was successful, but there is no additional content to send in the response payload. +* `400` Bad Request: input error with the request +* `401` Unauthorized: lacks valid authentication for the requested resource +* `403` Forbidden: refusal to authorize request +* `404` Not Found: could not find requested resource +* `409` Conflict: request conflict with the current state and the target resource +-- + +Source:: ++ +-- +Each permission policy and role created using the RBAC plugin is associated with a source to maintain data consistency within the plugin. You can manipulate permission policies and roles based on the following designated source information: + +* CSV file +* Configuration file +* REST API +* Legacy + +Managing roles and permission policies originating from CSV files and REST API involves straightforward modification based on their initial source information. + +The Configuration file pertains to the default `role:default/rbac_admin` role provided by the RBAC plugin. The default role has limited permissions to create, read, update, and delete permission policies or roles, and to read catalog entities. + +[NOTE] +==== +In case the default permissions are insufficient for your administrative requirements, you can create a custom admin role with required permission policies. +==== + +The legacy source applies to policies and roles defined before RBAC backend plugin version `2.1.3`, and is the least restrictive among the source location options. You must update the permissions and roles in legacy source to use either REST API or the CSV file sources. + +You can use the `GET` requests to query roles and policies and determine the source information, if required. +-- diff --git a/modules/authorization/con-user-stats-rhdh.adoc b/modules/authorization/con-user-stats-rhdh.adoc new file mode 100644 index 0000000000..68dbaca4ee --- /dev/null +++ b/modules/authorization/con-user-stats-rhdh.adoc @@ -0,0 +1,8 @@ +[id='con-user-stats-rhdh_{context}'] += User statistics in {product} + +In {product}, the `licensed-users-info-backend` plugin provides statistical information about the logged-in users using the Web UI or REST API endpoints. + +The `licensed-users-info-backend` plugin enables administrators to monitor the number of active users on {product-short}. Using this feature, organizations can compare their actual usage with the number of licenses they have purchased. Additionally, you can share the user metrics with {company-name} for transparency and accurate licensing. + +The `licensed-users-info-backend` plugin is enabled by default. This plugin enables a *Download User List* link at the bottom of the *Administration -> RBAC* tab. \ No newline at end of file diff --git a/modules/authorization/proc-download-user-stats-rhdh.adoc b/modules/authorization/proc-download-user-stats-rhdh.adoc new file mode 100644 index 0000000000..79a783df7f --- /dev/null +++ b/modules/authorization/proc-download-user-stats-rhdh.adoc @@ -0,0 +1,16 @@ +[id='proc-download-user-stats-rhdh_{context}'] += Downloading active users list in {product} + +You can download the list of users in CSV format using the {product-short} web interface. + +.Prerequisites + +* RBAC plugins (`@janus-idp/backstage-plugin-rbac` and `@janus-idp/backstage-plugin-rbac-backend`) must be enabled in {product}. +* An administrator role must be assigned. + +.Procedure + +. In {product}, navigate to *Administration* and select the *RBAC* tab. +. At the bottom of the *RBAC* page, click *Download User List*. +. Optional: Modify the file name in the *Save as* field and click *Save*. +. To access the downloaded users list, go to the *Downloads* folder on your local machine and open the CSV file. diff --git a/modules/authorization/proc-mounting-the-policy-csv-file-using-helm.adoc b/modules/authorization/proc-mounting-the-policy-csv-file-using-helm.adoc new file mode 100644 index 0000000000..1d75250ea7 --- /dev/null +++ b/modules/authorization/proc-mounting-the-policy-csv-file-using-helm.adoc @@ -0,0 +1,66 @@ +[id='proc-mounting-the-policy-csv-file-using-helm_{context}'] += Mounting `policy.csv` file to the {product-short} Helm chart + +When the {product} is deployed with the Helm chart, you must define the `policy.csv` file by mounting it to the {product-short} Helm chart. + +You can add your `policy.csv` file to the {product-short} Helm Chart by creating a `configMap` and mounting it. + +.Prerequisites + +* You are logged in to your {ocp-short} account using the {ocp-short} web console. +* {product} is installed and deployed using Helm Chart. ++ +//For more information about installing the {product} on {ocp-short} using Helm Chart, see xref:proc-install-rhdh-ocp-helm_{context}[]. +//replace with a link to the installation guide. + +.Procedure + +. In {ocp-short}, create a ConfigMap to hold the policies as shown in the following example: ++ +-- +.Example `ConfigMap` +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rbac-policy + namespace: rhdh +data: + rbac-policy.csv: | + p, role:default/guests, catalog-entity, read, allow + p, role:default/guests, catalog.entity.create, create, allow + + g, user:default/, role:default/guests +---- +-- + +. In the {product-short} Helm Chart, go to *Root Schema -> Backstage chart schema -> Backstage parameters -> Backstage container additional volume mounts*. +. Select *Add Backstage container additional volume mounts* and add the following values: ++ +-- +* *mountPath*: `opt/app-root/src/rbac` +* *Name*: `rbac-policy` +-- + +. Add the RBAC policy to the *Backstage container additional volumes* in the {product-short} Helm Chart: ++ +-- +* *name*: `rbac-policy` +* *configMap* +** *defaultMode*: `420` +** *name*: `rbac-policy` +-- + +. Update the policy path in the `app-config.yaml` file as follows: ++ +-- +.Example `app-config.yaml` file +[source,yaml] +---- +permission: + enabled: true + rbac: + policies-csv-file: ./rbac/rbac-policy.csv +---- +-- diff --git a/modules/authorization/proc-mounting-the-policy-csv-file-using-the-operator.adoc b/modules/authorization/proc-mounting-the-policy-csv-file-using-the-operator.adoc new file mode 100644 index 0000000000..617d41a187 --- /dev/null +++ b/modules/authorization/proc-mounting-the-policy-csv-file-using-the-operator.adoc @@ -0,0 +1,84 @@ +[id='proc-mounting-the-policy-csv-file-using-the-operator_{context}'] += Mounting `policy.csv` file using the {product-short} Operator + +When the {product} is deployed with the Operator, you can add your `policy.csv` file using the {product-short} Operator by creating a `ConfigMap` and mounting it through your Custom Resource (CR). + +.Prerequisites + +* You are logged in to your {ocp-short} account using the {ocp-short} web console. +* {product} is installed and deployed using the Operator. +* You have added a custom configuration file to {ocp-short}. For more information, see link:{LinkAdminGuide}[Adding a custom configuration file to {ocp-short}]. ++ +//For more information about installing the {product} on {ocp-short} using the Operator, see xref:proc-install-rhdh-ocp-operator_{context}[]. +//replace with a link to the installation guide. + +.Procedure + +. In {ocp-short}, create a ConfigMap to hold the policies as shown in the following example: ++ +-- +.Example `ConfigMap` +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rbac-policy +data: + rbac-policy.csv: | + p, role:default/guests, catalog-entity, read, allow + p, role:default/guests, catalog.entity.create, create, allow + + g, user:default/, role:default/guests +---- +-- + +. Update the policy path in your custom `app-config.yaml` ConfigMap as follows: ++ +-- +.Example `app-config.yaml` file +[source,yaml] +---- +permission: + enabled: true + rbac: + policies-csv-file: ./rbac-policy.csv +---- +-- + +. From the *Developer* perspective in the {ocp-short} web console, select the *Topology* view. +. Click the overflow menu for the {product} instance that you want to use and select *Edit Backstage* to load the YAML view of the {product} instance. +. In the CR, enter the name of the custom `rbac-policy` ConfigMap as the value for the `spec.application.extraFiles.configMaps` field. For example: ++ +.Example custom resource +[source, yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: example +spec: + application: + appConfig: + mountPath: /opt/app-root/src + configMaps: + - name: app-config-rhdh + extraEnvs: + secrets: + - name: secrets-rhdh + extraFiles: + mountPath: /opt/app-root/src + configMaps: + - name: rbac-policy + replicas: 1 + route: + enabled: true + database: + enableLocalDb: true +---- +. Click *Save*. + +.Verification + +. Navigate back to the *Topology* view and wait for the {product} pod to start. +. Click the *Open URL* icon to access the {product} platform with the updated configuration settings. diff --git a/modules/authorization/proc-rbac-config-conditional-policy-file.adoc b/modules/authorization/proc-rbac-config-conditional-policy-file.adoc new file mode 100644 index 0000000000..07c611bb2d --- /dev/null +++ b/modules/authorization/proc-rbac-config-conditional-policy-file.adoc @@ -0,0 +1,166 @@ +[id='proc-rbac-config-conditional-policy-file_{context}'] += Configuring conditional policies defined in an external file + +You can configure and manage conditional policies that are defined in an external file. To define conditional policies, you can directly edit the configuration files and pass them to {product-short}, instead of using the {product-short} web UI or API. You can configure {product-short} to use these files instead of the default files. + +.Prerequisites +* You are logged in to your {ocp-short} account using the {ocp-short} web console. +* You have defined roles and associated policies in a CSV file that serves as a basis for creating roles and permissions. Ensure that you mount the CSV file to {product-short}. ++ +For more information, see xref:ref-rbac-conditional-policy-definition_title-authorization[Conditional policies definition] and xref:con-rbac-config-permission-policies-external-file_title-authorization[Configuration of permission policies defined in an external file]. + +.Procedure + +. Define conditional policies in a YAML file, which includes role references, permission mappings, and conditions. ++ +-- +The following is an example of a YAML file defining conditional policies: + +.Example YAML file defining conditional policies +[source,yaml] +---- +--- +result: CONDITIONAL +roleEntityRef: 'role:default/test' +pluginId: catalog +resourceType: catalog-entity +permissionMapping: + - read + - update +conditions: + rule: IS_ENTITY_OWNER + resourceType: catalog-entity + params: + claims: + - 'group:default/team-a' + - 'group:default/team-b' +--- +result: CONDITIONAL +roleEntityRef: 'role:default/test' +pluginId: catalog +resourceType: catalog-entity +permissionMapping: + - delete +conditions: + rule: IS_ENTITY_OWNER + resourceType: catalog-entity + params: + claims: + - 'group:default/team-a' +---- +-- +. In {ocp-short}, create a ConfigMap to hold the policies as shown in the following example: ++ +-- +.Example ConfigMap +[source, yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: rbac-conditional-policy + namespace: rhdh +data: + rbac-policy.yaml: | + p, role:default/guests, catalog-entity, read, allow + + result: CONDITIONAL + roleEntityRef: 'role:default/test' + pluginId: catalog + resourceType: catalog-entity + permissionMapping: + - read + - update + conditions: + rule: IS_ENTITY_OWNER + resourceType: catalog-entity + params: + claims: + - 'group:default/team-a' + - 'group:default/team-b' +---- +-- + +. Open `app-config.yaml` file and specify the path to `conditionalPoliciesFile` as shown in the following example: ++ +-- +.Example `app-config.yaml` file +[source,yaml] +---- +permission: + enabled: true + rbac: + conditionalPoliciesFile: /some/path/conditional-policies.yaml +---- +-- + +. To enable automatic reloading of the policy file without restarting the application, add the `policyFileReload` option and set it to `true`: ++ +-- +.Example `app-config.yaml` file +[source,yaml] +---- +permission: + enabled: true + rbac: + conditionalPoliciesFile: /some/path/conditional-policies.yaml + policies-csv-file: /some/path/rbac-policy.csv + policyFileReload: true +---- +-- + +. Optional: Define nested conditional policies in the YAML file as needed. ++ +-- +.Example for nested conditional policies +[source,yaml] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/developer", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["delete"], + "conditions": { + "allOf": [ + { + "anyOf": [ + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": [ + "group" + ] + } + }, + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": [ + "$ownerRefs" + ] + } + } + ] + }, + { + "not": { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": [ + "api" + ] + } + } + } + ] +} +} +---- + +In the previous example, the `role:default/developer` is granted the condition to delete catalog entities only if they are the entity owner or if the catalog entity belongs to a group. However, this condition does not apply if the catalog entity is an API. +-- + diff --git a/modules/authorization/proc-rbac-send-request-rbac-rest-api.adoc b/modules/authorization/proc-rbac-send-request-rbac-rest-api.adoc new file mode 100644 index 0000000000..2721eb99d8 --- /dev/null +++ b/modules/authorization/proc-rbac-send-request-rbac-rest-api.adoc @@ -0,0 +1,53 @@ +[id='proc-rbac-send-request-rbac-rest-api_{context}'] += Sending requests with the RBAC REST API using a REST client or curl utility + +The RBAC REST API enables you to interact with the permission policies and roles in {product-short} without using the user interface. You can send RBAC REST API requests using any REST client or curl utility. + +.Prerequisites + +* {product} is installed and running. + +For more information about installing {product}, see link:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart]. + +* You have access to the {product-short}. + +.Procedure + +. Identify a relevant API endpoint to which you want to send a request, such as `POST /api/permission/policies`. Adjust any request details according to your use case. ++ +-- +*For REST client*: + +* Authorization: Enter the generated token from the web console. +* HTTP method: Set to `POST`. +* URL: Enter the RBAC REST API base URL and endpoint such as +`pass:c[http://localhost:7007/api/permission/policies]`. + + +*For curl utility*: + +* `-X`: Set to `POST` +* `-H`: Set the following header: ++ +`Content-type: application/json` ++ +`Authorization: Bearer $token` ++ +`$token` is the requested token from the web console in a browser. + +* URL: Enter the following RBAC REST API base URL endpoint, such as `pass:c[http://localhost:7007/api/permission/policies]` +* `-d`: Add a request JSON body + +*Example requests*: + +`curl -X POST "http://localhost:7007/api/permission/roles" -d '{"memberReferences": ["group:default/example"], "name": "role:default/test", "metadata": { "description": "This is a test role" } }' -H "Content-Type: application/json" -H "Authorization: Bearer $token" -v` + +`curl -X POST "http://localhost:7007/api/permission/policies" -d '[{"entityReference":"role:default/test", "permission": "catalog-entity", "policy": "read", "effect":"allow"}]' -H "Content-Type: application/json" -H "Authorization: Bearer $token" -v` + +`curl -X POST "http://localhost:7007/api/permission/roles/conditions" -d '{"result": "CONDITIONAL", "roleEntityRef": "role:default/test", "pluginId": "catalog", "resourceType": "catalog-entity", "permissionMapping": ["read"], "conditions": {"rule": "IS_ENTITY_OWNER", "resourceType": "catalog-entity", "params": {"claims": ["group:default/janus-authors"]}}}' -H "Content-Type: application/json" -H "Authorization: Bearer $token" -v` + +-- + +. Execute the request and review the response. + + diff --git a/modules/authorization/proc-rbac-ui-create-role.adoc b/modules/authorization/proc-rbac-ui-create-role.adoc new file mode 100644 index 0000000000..f26abb6cd2 --- /dev/null +++ b/modules/authorization/proc-rbac-ui-create-role.adoc @@ -0,0 +1,32 @@ +[id='proc-rbac-ui-create-role_{context}'] += Creating a role in the {product} Web UI + +You can create a role in the {product} using the Web UI. + +.Prerequisites +* You have an administrator role in the {product-short}. +* You have installed the `@janus-idp/backstage-plugin-rbac` plugin in {product-short}. For more information, see link:{LinkPluginsGuide}[{NameOfPluginsGuide}]. +* You have configured the required permission policies. For more information, see xref:con-rbac-config-permission-policies_{context}[]. + +.Procedure + +. Go to *Administration* at the bottom of the sidebar in the {product-short}. ++ +-- +The *RBAC* tab appears, displaying all the created roles in the {product-short}. +-- + +. (Optional) Click any role to view the role information on the *OVERVIEW* page. +. Click *CREATE* to create a role. +. Enter the name and description of the role in the given fields and click *NEXT*. +. Add users and groups using the search field, and click *NEXT*. +. Select *Plugin* and *Permission* from the drop-downs in the *Add permission policies* section. +. Select or clear the *Policy* that you want to set in the *Add permission policies* section, and click *NEXT*. +. Review the added information in the *Review and create* section. +. Click *CREATE*. + +.Verification + +The created role appears in the list available in the *RBAC* tab. + + diff --git a/modules/authorization/proc-rbac-ui-delete-role.adoc b/modules/authorization/proc-rbac-ui-delete-role.adoc new file mode 100644 index 0000000000..48746147af --- /dev/null +++ b/modules/authorization/proc-rbac-ui-delete-role.adoc @@ -0,0 +1,29 @@ +[id='proc-rbac-ui-delete-role_{context}'] += Deleting a role in the {product} Web UI + +You can delete a role in the {product} using the Web UI. + +[NOTE] +==== +The policies generated from a `policy.csv` or ConfigMap file cannot be edited or deleted using the {product-short} Web UI. +==== + +.Prerequisites +* You have an administrator role in the {product-short}. +* You have installed the `@janus-idp/backstage-plugin-rbac` plugin in {product-short}. For more information, see link:{LinkPluginsGuide}[{NameOfPluginsGuide}]. +* You have configured the required permission policies. For more information, see xref:con-rbac-config-permission-policies_{context}[]. +* The role that you want to delete is created in the {product-short}. + +.Procedure + +. Go to *Administration* at the bottom of the sidebar in the {product-short}. ++ +-- +The *RBAC* tab appears, displaying all the created roles in the {product-short}. +-- + +. (Optional) Click any role to view the role information on the *OVERVIEW* page. +. Select the delete icon from the *Actions* column for the role that you want to delete. ++ +*Delete this role?* pop-up appears on the screen. +. Click *DELETE*. \ No newline at end of file diff --git a/modules/authorization/proc-rbac-ui-edit-role.adoc b/modules/authorization/proc-rbac-ui-edit-role.adoc new file mode 100644 index 0000000000..58387edfa2 --- /dev/null +++ b/modules/authorization/proc-rbac-ui-edit-role.adoc @@ -0,0 +1,31 @@ +[id='proc-rbac-ui-edit-role_{context}'] += Editing a role in the {product} Web UI + +You can edit a role in the {product} using the Web UI. + +[NOTE] +==== +The policies generated from a `policy.csv` or ConfigMap file cannot be edited or deleted using the {product-short} Web UI. +==== + +.Prerequisites +* You have an administrator role in the {product-short}. +* You have installed the `@janus-idp/backstage-plugin-rbac` plugin in {product-short}. For more information, see link:{LinkPluginsGuide}[{NameOfPluginsGuide}]. +* You have configured the required permission policies. For more information, see xref:con-rbac-config-permission-policies_{context}[]. +* The role that you want to edit is created in the {product-short}. + +.Procedure + +. Go to *Administration* at the bottom of the sidebar in the {product-short}. ++ +-- +The *RBAC* tab appears, displaying all the created roles in the {product-short}. +-- + +. (Optional) Click any role to view the role information on the *OVERVIEW* page. +. Select the edit icon for the role that you want to edit. +. Edit the details of the role, such as name, description, users and groups, and permission policies, and click *NEXT*. +. Review the edited details of the role and click *SAVE*. + +After editing a role, you can view the edited details of a role on the *OVERVIEW* page of a role. You can also edit a role’s users and groups or permissions by using the edit icon on the respective cards on the *OVERVIEW* page. + diff --git a/modules/authorization/proc-rbac-ui-manage-roles.adoc b/modules/authorization/proc-rbac-ui-manage-roles.adoc new file mode 100644 index 0000000000..09ca3d11de --- /dev/null +++ b/modules/authorization/proc-rbac-ui-manage-roles.adoc @@ -0,0 +1,10 @@ +[id='proc-rbac-ui-manage-roles_{context}'] += Managing role-based access controls (RBAC) using the {product} Web UI + +Administrators can use the {product-short} web interface (Web UI) to allocate specific roles and permissions to individual users or groups. Allocating roles ensures that access to resources and functionalities is regulated across the {product-short}. + +With the administrator role in {product-short}, you can assign permissions to users and groups, which allow users or groups to view, create, modify, and delete the roles using the {product-short} Web UI. + +To access the RBAC features in the Web UI, you must install and configure the `@janus-idp/backstage-plugin-rbac` plugin as a dynamic plugin. For more information about installing a dynamic plugin, see link:{LinkPluginsGuide}[{NameOfPluginsGuide}]. + +After you install the `@janus-idp/backstage-plugin-rbac` plugin, the *Administration* option appears at the bottom of the sidebar. When you can click *Administration*, the RBAC tab appears by default, displaying all of the existing roles created in the {product-short}. In the RBAC tab, you can also view the total number of users, groups, and the total number of permission policies associated with a role. You can also edit or delete a role using the *Actions* column. \ No newline at end of file diff --git a/modules/authorization/ref-rbac-conditional-policy-definition.adoc b/modules/authorization/ref-rbac-conditional-policy-definition.adoc new file mode 100644 index 0000000000..464e6486f5 --- /dev/null +++ b/modules/authorization/ref-rbac-conditional-policy-definition.adoc @@ -0,0 +1,347 @@ +[id='ref-rbac-conditional-policy-definition_{context}'] += Conditional policies definition + +You can access API endpoints for conditional policies in {product}. For example, to retrieve the available conditional rules, which can help you define these policies, you can access the `GET [api/plugins/condition-rules]` endpoint. + +The `api/plugins/condition-rules` returns the condition parameters schemas, for example: + +[source,json] +---- +[ + { + "pluginId": "catalog", + "rules": [ + { + "name": "HAS_ANNOTATION", + "description": "Allow entities with the specified annotation", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "annotation": { + "type": "string", + "description": "Name of the annotation to match on" + }, + "value": { + "type": "string", + "description": "Value of the annotation to match on" + } + }, + "required": [ + "annotation" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_LABEL", + "description": "Allow entities with the specified label", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "Name of the label to match on" + } + }, + "required": [ + "label" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_METADATA", + "description": "Allow entities with the specified metadata subfield", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Property within the entities metadata to match on" + }, + "value": { + "type": "string", + "description": "Value of the given property to match on" + } + }, + "required": [ + "key" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_SPEC", + "description": "Allow entities with the specified spec subfield", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Property within the entities spec to match on" + }, + "value": { + "type": "string", + "description": "Value of the given property to match on" + } + }, + "required": [ + "key" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "IS_ENTITY_KIND", + "description": "Allow entities matching a specified kind", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "kinds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of kinds to match at least one of" + } + }, + "required": [ + "kinds" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "IS_ENTITY_OWNER", + "description": "Allow entities owned by a specified claim", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "claims": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of claims to match at least one on within ownedBy" + } + }, + "required": [ + "claims" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + ] + } + ... +] +---- + +The RBAC backend API constructs a condition JSON object based on the previous condition schema. + +== Examples of conditional policies + +In {product}, you can define conditional policies with or without criteria. You can use the following examples to define the conditions based on your use case: + +A condition without criteria:: ++ +-- +Consider a condition without criteria displaying catalogs only if user is a member of the owner group. To add this condition, you can use the catalog plugin schema `IS_ENTITY_OWNER` as follows: + +.Example condition without criteria +[source,json] +---- +{ + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } +} +---- + +In the previous example, the only conditional parameter used is `claims`, which contains a list of user or group entity references. + +You can apply the previous example condition to the RBAC REST API by adding additional parameters as follows: + +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + } +} +---- +-- + +A condition with criteria:: ++ +-- +Consider a condition with criteria, which displays catalogs only if user is a member of owner group OR displays list of all catalog user groups. + +To add the criteria, you can add another rule as `IS_ENTITY_KIND` in the condition as follows: + +.Example condition with criteria +[source,json] +---- +{ + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ] +} +---- + +[NOTE] +==== +Running conditions in parallel during creation is not supported. Therefore, consider defining nested conditional policies based on the available criteria. +==== + +.Example of nested conditions +[source,json] +---- +{ + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ], + "not": { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { "kinds": ["Api"] } + } +} +---- + +You can apply the previous example condition to the RBAC REST API by adding additional parameters as follows: + +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ] + } +} +---- +-- + +The following examples can be used with {product-short} plugins. These examples can help you determine how to define conditional policies: + +.Conditional policy defined for Keycloak plugin +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/developer", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["update", "delete"], + "conditions": { + "not": { + "rule": "HAS_ANNOTATION", + "resourceType": "catalog-entity", + "params": { "annotation": "keycloak.org/realm", "value": "" } + } + } +} +---- + +The previous example of Keycloak plugin prevents users in the `role:default/developer` from updating or deleting users that are ingested into the catalog from the Keycloak plugin. + +[NOTE] +==== +In the previous example, the annotation `keycloak.org/realm` requires the value of ``. +==== + +.Conditional policy defined for Quay plugin +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/developer", + "pluginId": "scaffolder", + "resourceType": "scaffolder-action", + "permissionMapping": ["use"], + "conditions": { + "not": { + "rule": "HAS_ACTION_ID", + "resourceType": "scaffolder-action", + "params": { "actionId": "quay:create-repository" } + } + } +} +---- + +The previous example of Quay plugin prevents the role `role:default/developer` from using the Quay scaffolder action. Note that `permissionMapping` contains `use`, signifying that `scaffolder-action` resource type permission does not have a permission policy. + +For more information about permissions in {product}, see xref:ref-rbac-permission-policies_{context}[]. + + + + + diff --git a/modules/authorization/ref-rbac-permission-policies.adoc b/modules/authorization/ref-rbac-permission-policies.adoc new file mode 100644 index 0000000000..f5c4a2b5a2 --- /dev/null +++ b/modules/authorization/ref-rbac-permission-policies.adoc @@ -0,0 +1,220 @@ +[id='ref-rbac-permission-policies_{context}'] += Permission policies in {product} + +Permission policies in {product} are a set of rules to govern access to resources or functionalities. These policies state the authorization level that is granted to users based on their roles. The permission policies are implemented to maintain security and confidentiality within a given environment. + +You can define the following types of permissions in {product-short}: + +* resource type +* basic + +The distinction between the two permission types depend on whether a permission includes a defined resource type. + +You can define the resource type permission using either the associated resource type or the permission name as shown in the following example: + +.Example resource type permission definition +[source,csv] +---- +p, role:default/myrole, catalog.entity.read, read, allow +g, user:default/myuser, role:default/myrole + +p, role:default/another-role, catalog-entity, read, allow +g, user:default/another-user, role:default/another-role +---- + +You can define the basic permission in {product-short} using the permission name as shown in the following example: + +.Example basic permission definition +[source,csv] +---- +p, role:default/myrole, catalog.entity.create, create, allow +g, user:default/myuser, role:default/myrole +---- + +The following permission policies are supported in the {product-short}: + +Catalog permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`catalog.entity.read` +|`catalog-entity` +|read +|Allows user or role to read from the catalog + +|`catalog.entity.create` +| +|create +|Allows user or role to create catalog entities, including registering an existing component in the catalog + +|`catalog.entity.refresh` +|`catalog-entity` +|update +|Allows user or role to refresh a single or multiple entities from the catalog + +|`catalog.entity.delete` +|`catalog-entity` +|delete +|Allows user or role to delete a single or multiple entities from the catalog + +|`catalog.location.read` +| +|read +|Allows user or role to read a single or multiple locations from the catalog + +|`catalog.location.create` +| +|create +|Allows user or role to create locations within the catalog + +|`catalog.location.delete` +| +|delete +|Allows user or role to delete locations from the catalog +|=== + +Bulk import permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`bulk.import` +|`bulk-import` +| +|Allows the user to access the bulk import endpoints, such as listing all repositories and organizations accessible by all GitHub integrations and managing the import requests. + +|=== + +Scaffolder permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`scaffolder.action.execute` +|`scaffolder-action` +| +|Allows the execution of an action from a template + +|`scaffolder.template.parameter.read` +|`scaffolder-template` +|read +|Allows user or role to read a single or multiple one parameters from a template + +|`scaffolder.template.step.read` +|`scaffolder-template` +|read +|Allows user or role to read a single or multiple steps from a template + +|`scaffolder.task.create` +| +|create +|Allows the user or role to trigger software templates which create new scaffolder tasks + +|`scaffolder.task.cancel` +| +| +|Allows the user or role to cancel currently running scaffolder tasks + +|`scaffolder.task.read` +| +|read +|Allows user or role to read all scaffolder tasks and their associated events and logs +|=== + +RBAC permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`policy.entity.read` +|`policy-entity` +|read +|Allows user or role to read permission policies and roles + +|`policy.entity.create` +|`policy-entity` +|create +|Allows user or role to create a single or multiple permission policies and roles + +|`policy.entity.update` +|`policy-entity` +|update +|Allows user or role to update a single or multiple permission policies and roles + +|`policy.entity.delete` +|`policy-entity` +|delete +|Allows user or role to delete a single or multiple permission policies and roles +|=== + +Kubernetes permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`kubernetes.proxy` +| +| +|Allows user or role to access the proxy endpoint +|=== + +OCM permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`ocm.entity.read` +| +|read +|Allows user or role to read from the OCM plugin + +|`ocm.cluster.read` +| +|read +|Allows user or role to read the cluster information in the OCM plugin +|=== + +Topology permissions:: + +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Resource type +|Policy +|Description + +|`topology.view.read` +| +|read +|Allows user or role to view the topology plugin + +|`kubernetes.proxy` +| +| +|Allows user or role to access the proxy endpoint, allowing them to read pod logs and events within {product-very-short} +|=== diff --git a/modules/authorization/ref-rbac-rest-api-endpoints.adoc b/modules/authorization/ref-rbac-rest-api-endpoints.adoc new file mode 100644 index 0000000000..1e78f2ac26 --- /dev/null +++ b/modules/authorization/ref-rbac-rest-api-endpoints.adoc @@ -0,0 +1,1054 @@ +[id='ref-rbac-rest-api-endpoints_{context}'] += Supported RBAC REST API endpoints + +The RBAC REST API provides endpoints for managing roles, permissions, and conditional policies in the {product-short} and for retrieving information about the roles and policies. + +== Roles + +The RBAC REST API supports the following endpoints for managing roles in the {product}. + +[GET] /api/permission/roles:: ++ +-- +Returns all roles in {product-short}. + +.Example response (JSON) +[source,json] +---- +[ + { + "memberReferences": ["user:default/username"], + "name": "role:default/guests" + }, + { + "memberReferences": [ + "group:default/groupname", + "user:default/username" + ], + "name": "role:default/rbac_admin" + } +] +---- +-- + +[GET] /api/permission/roles/____/____/____:: ++ +-- +Returns information for a single role in {product-short}. + +.Example response (JSON) +[source,json] +---- +[ + { + "memberReferences": [ + "group:default/groupname", + "user:default/username" + ], + "name": "role:default/rbac_admin" + } +] +---- +-- + +[POST] /api/permission/roles/____/____/____:: ++ +-- +Creates a role in {product-short}. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`body` +|The `memberReferences`, `group`, `namespace`, and `name` the new role to be created. +|Request body +|Required +|=== + +.Example request body (JSON) +[source,json] +---- +{ + "memberReferences": ["group:default/test"], + "name": "role:default/test_admin" +} +---- + +.Example response +[source] +---- +201 Created +---- +-- + +[PUT] /api/permission/roles/____/____/____:: ++ +-- +Updates `memberReferences`, `kind`, `namespace`, or `name` for a role in {product-short}. + +.Request parameters +The request body contains the `oldRole` and `newRole` objects: +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`body` +|The `memberReferences`, `group`, `namespace`, and `name` the new role to be created. +|Request body +|Required +|=== + +.Example request body (JSON) +[source,json] +---- +{ + "oldRole": { + "memberReferences": ["group:default/test"], + "name": "role:default/test_admin" + }, + "newRole": { + "memberReferences": ["group:default/test", "user:default/test2"], + "name": "role:default/test_admin" + } +} +---- + +.Example response +[source] +---- +200 OK +---- +-- + +[DELETE] /api/permission/roles/____/____/____?memberReferences=:: ++ +-- +Deletes the specified user or group from a role in {product-short}. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`kind` +|Kind of the entity +|String +|Required + +|`namespace` +|Namespace of the entity +|String +|Required + +|`name` +|Name of the entity +|String +|Required + +|`memberReferences` +|Associated group information +|String +|Required +|=== + +.Example response +[source] +---- +204 +---- +-- + +[DELETE] /api/permission/roles/____/____/____:: ++ +-- +Deletes a specified role from {product-short}. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`kind` +|Kind of the entity +|String +|Required + +|`namespace` +|Namespace of the entity +|String +|Required + +|`name` +|Name of the entity +|String +|Required +|=== + +.Example response +[source] +---- +204 +---- +-- + +== Permission policies + +The RBAC REST API supports the following endpoints for managing permission policies in the {product}. + +[GET] /api/permission/policies:: ++ +-- +Returns permission policies list for all users. + +.Example response (JSON) +[source,json] +---- +[ + { + "entityReference": "role:default/test", + "permission": "catalog-entity", + "policy": "read", + "effect": "allow", + "metadata": { + "source": "csv-file" + } + }, + { + "entityReference": "role:default/test", + "permission": "catalog.entity.create", + "policy": "use", + "effect": "allow", + "metadata": { + "source": "csv-file" + } + }, +] +---- +-- + +[GET] /api/permission/policies/____/____/____:: ++ +-- +Returns permission policies related to the specified entity reference. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`kind` +|Kind of the entity +|String +|Required + +|`namespace` +|Namespace of the entity +|String +|Required + +|`name` +|Name related to the entity +|String +|Required +|=== + +.Example response (JSON) +[source,json] +---- +[ + { + "entityReference": "role:default/test", + "permission": "catalog-entity", + "policy": "read", + "effect": "allow", + "metadata": { + "source": "csv-file" + } + }, + { + "entityReference": "role:default/test", + "permission": "catalog.entity.create", + "policy": "use", + "effect": "allow", + "metadata": { + "source": "csv-file" + } + } +] +---- +-- + +[POST] /api/permission/policies:: ++ +-- +Creates a permission policy for a specified entity. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`entityReference` +|Reference values of an entity including `kind`, `namespace`, and `name` +|String +|Required + +|`permission` +|Permission from a specific plugin, resource type, or name +|String +|Required + +|`policy` +|Policy action for the permission, such as `create`, `read`, `update`, `delete`, or `use` +|String +|Required + +|`effect` +|Indication of allowing or not allowing the policy +|String +|Required +|=== + +.Example request body (JSON) +[source,json] +---- +[ + { + "entityReference": "role:default/test", + "permission": "catalog-entity", + "policy": "read", + "effect": "allow" + } +] +---- + +.Example response +[source] +---- +201 Created +---- +-- + +[PUT] /api/permission/policies/____/____/____:: ++ +-- +Updates a permission policy for a specified entity. + +.Request parameters +The request body contains the `oldPolicy` and `newPolicy` objects: +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`permission` +|Permission from a specific plugin, resource type, or name +|String +|Required + +|`policy` +|Policy action for the permission, such as `create`, `read`, `update`, `delete`, or `use` +|String +|Required + +|`effect` +|Indication of allowing or not allowing the policy +|String +|Required +|=== + +.Example request body (JSON) +[source,json] +---- +{ + "oldPolicy": [ + { + "permission": "catalog-entity", + "policy": "read", + "effect": "allow" + }, + { + "permission": "catalog.entity.create", + "policy": "create", + "effect": "allow" + } + ], + "newPolicy": [ + { + "permission": "catalog-entity", + "policy": "read", + "effect": "deny" + }, + { + "permission": "policy-entity", + "policy": "read", + "effect": "allow" + } + ] +} +---- + +.Example response +[source] +---- +200 +---- +-- + +[DELETE] /api/permission/policies/____/____/____?permission={value1}&policy={value2}&effect={value3}:: ++ +-- +Deletes a permission policy added to the specified entity. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`kind` +|Kind of the entity +|String +|Required + +|`namespace` +|Namespace of the entity +|String +|Required + +|`name` +|Name related to the entity +|String +|Required + +|`permission` +|Permission from a specific plugin, resource type, or name +|String +|Required + +|`policy` +|Policy action for the permission, such as `create`, `read`, `update`, `delete`, or `use` +|String +|Required + +|`effect` +|Indication of allowing or not allowing the policy +|String +|Required +|=== + +.Example response +[source] +---- +204 No Content +---- +-- + +[DELETE] /api/permission/policies/____/____/____:: ++ +-- +Deletes all permission policies added to the specified entity. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`kind` +|Kind of the entity +|String +|Required + +|`namespace` +|Namespace of the entity +|String +|Required + +|`name` +|Name related to the entity +|String +|Required +|=== + +.Example response +[source] +---- +204 No Content +---- +-- + +[GET] /api/permission/plugins/policies:: ++ +-- +Returns permission policies for all static plugins. + +.Example response (JSON) +[source,json] +---- +[ + { + "pluginId": "catalog", + "policies": [ + { + "isResourced": true, + "permission": "catalog-entity", + "policy": "read" + }, + { + "isResourced": false, + "permission": "catalog.entity.create", + "policy": "create" + }, + { + "isResourced": true, + "permission": "catalog-entity", + "policy": "delete" + }, + { + "isResourced": true, + "permission": "catalog-entity", + "policy": "update" + }, + { + "isResourced": false, + "permission": "catalog.location.read", + "policy": "read" + }, + { + "isResourced": false, + "permission": "catalog.location.create", + "policy": "create" + }, + { + "isResourced": false, + "permission": "catalog.location.delete", + "policy": "delete" + } + ] + }, + ... +] + +---- +-- + +== Conditional policies + +The RBAC REST API supports the following endpoints for managing conditional policies in the {product}. + +[GET] /api/permission/plugins/condition-rules:: ++ +-- +Returns available conditional rule parameter schemas for the available plugins that are enabled in {product-short}. + +.Example response (JSON) +[source,json] +---- +[ + { + "pluginId": "catalog", + "rules": [ + { + "name": "HAS_ANNOTATION", + "description": "Allow entities with the specified annotation", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "annotation": { + "type": "string", + "description": "Name of the annotation to match on" + }, + "value": { + "type": "string", + "description": "Value of the annotation to match on" + } + }, + "required": [ + "annotation" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_LABEL", + "description": "Allow entities with the specified label", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "label": { + "type": "string", + "description": "Name of the label to match on" + } + }, + "required": [ + "label" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_METADATA", + "description": "Allow entities with the specified metadata subfield", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Property within the entities metadata to match on" + }, + "value": { + "type": "string", + "description": "Value of the given property to match on" + } + }, + "required": [ + "key" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "HAS_SPEC", + "description": "Allow entities with the specified spec subfield", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Property within the entities spec to match on" + }, + "value": { + "type": "string", + "description": "Value of the given property to match on" + } + }, + "required": [ + "key" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "IS_ENTITY_KIND", + "description": "Allow entities matching a specified kind", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "kinds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of kinds to match at least one of" + } + }, + "required": [ + "kinds" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "IS_ENTITY_OWNER", + "description": "Allow entities owned by a specified claim", + "resourceType": "catalog-entity", + "paramsSchema": { + "type": "object", + "properties": { + "claims": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of claims to match at least one on within ownedBy" + } + }, + "required": [ + "claims" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + } + ] + } + ... +] +---- +-- + +[GET] /api/permission/roles/conditions/:id:: ++ +-- +Returns conditions for the specified ID. + +.Example response (JSON) +[source,json] +---- +{ + "id": 1, + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ] + } +} +---- +-- + +[GET] /api/permission/roles/conditions:: ++ +-- +Returns list of all conditions for all roles. + +.Example response (JSON) +[source,json] +---- +[ + { + "id": 1, + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ] + } + } +] +---- +-- + +[POST] /api/permission/roles/conditions:: ++ +-- +Creates a conditional policy for the specified role. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`result` +|Always has the value `CONDITIONAL` +|String +|Required + +|`roleEntityRef` +|String entity reference to the RBAC role, such as `role:default/dev` +|String +|Required + +|`pluginId` +|Corresponding plugin ID, such as `catalog` +|String +|Required + +|`permissionMapping` +|Array permission action, such as `['read', 'update', 'delete']` +|String array +|Required + +|`resourceType` +|Resource type provided by the plugin, such as `catalog-entity` +|String +|Required + +|`conditions` +|Condition JSON with parameters or array parameters joined by criteria +|JSON +|Required + +|`name` +|Name of the role +|String +|Required + +|`metadata.description` +|The description of the role +|String +|Optional +|=== + +.Example request body (JSON) +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + } +} +---- + +.Example response (JSON) +[source,json] +---- +{ + "id": 1 +} +---- +-- + +[PUT] /permission/roles/conditions/:id:: ++ +-- +Updates a condition policy for a specified ID. + +.Request parameters +[cols="15%,45%,15%,25%", frame="all", options="header"] +|=== +|Name +|Description +|Type +|Presence + +|`result` +|Always has the value `CONDITIONAL` +|String +|Required + +|`roleEntityRef` +|String entity reference to the RBAC role, such as `role:default/dev` +|String +|Required + +|`pluginId` +|Corresponding plugin ID, such as `catalog` +|String +|Required + +|`permissionMapping` +|Array permission action, such as `['read', 'update', 'delete']` +|String array +|Required + +|`resourceType` +|Resource type provided by the plugin, such as `catalog-entity` +|String +|Required + +|`conditions` +|Condition JSON with parameters or array parameters joined by criteria +|JSON +|Required + +|`name` +|Name of the role +|String +|Required + +|`metadata.description` +|The description of the role +|String +|Optional +|=== + +.Example request body (JSON) +[source,json] +---- +{ + "result": "CONDITIONAL", + "roleEntityRef": "role:default/test", + "pluginId": "catalog", + "resourceType": "catalog-entity", + "permissionMapping": ["read"], + "conditions": { + "anyOf": [ + { + "rule": "IS_ENTITY_OWNER", + "resourceType": "catalog-entity", + "params": { + "claims": ["group:default/team-a"] + } + }, + { + "rule": "IS_ENTITY_KIND", + "resourceType": "catalog-entity", + "params": { + "kinds": ["Group"] + } + } + ] + } +} +---- + +.Example response +[source] +---- +200 +---- +-- + +[DELETE] /api/permission/roles/conditions/:id:: ++ +-- +Deletes a conditional policy for the specified ID. + +.Example response +[source] +---- +204 +---- +-- + +== User statistics + +The `licensed-users-info-backend` plugin exposes various REST API endpoints to retrieve data related to logged-in users. + +No additional configuration is required for the `licensed-users-info-backend` plugin. If the RBAC backend plugin is enabled, then an administrator role must be assigned to access the endpoints, as the endpoints are protected by the `policy.entity.read` permission. + +The base URL for user statistics endpoints is `pass:c[http://SERVER:PORT/api/licensed-users-info]`, such as `pass:c[http://localhost:7007/api/licensed-users-info]`. + +[GET] /users/quantity:: ++ +-- +Returns the total number of logged-in users. + +.Example request +[source,bash] +---- +curl -X GET "http://localhost:7007/api/licensed-users-info/users/quantity" \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer $token" +---- + +.Example response +[source,json] +---- +{ "quantity": "2" } +---- +-- + +[GET] /users:: ++ +-- +Returns a list of logged-in users with their details. + +.Example request +[source,bash] +---- +curl -X GET "http://localhost:7007/api/licensed-users-info/users" \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer $token" +---- + +.Example response +[source,json] +---- +[ + { + "userEntityRef": "user:default/dev", + "lastTimeLogin": "Thu, 22 Aug 2024 16:27:41 GMT", + "displayName": "John Leavy", + "email": "dev@redhat.com" + } +] +---- +-- + +[GET] /users:: ++ +-- +Returns a list of logged-in users in CSV format. + +.Example request +[source,bash] +---- +curl -X GET "http://localhost:7007/api/licensed-users-info/users" \ +-H "Content-Type: text/csv" \ +-H "Authorization: Bearer $token" +---- + +.Example response +[source,csv] +---- +userEntityRef,displayName,email,lastTimeLogin +user:default/dev,John Leavy,dev@redhat.com,"Thu, 22 Aug 2024 16:27:41 GMT" +---- +-- + + diff --git a/modules/con-rhdh-plugins.adoc b/modules/con-rhdh-plugins.adoc new file mode 100644 index 0000000000..829e0b401b --- /dev/null +++ b/modules/con-rhdh-plugins.adoc @@ -0,0 +1,7 @@ +[id="con-rhdh-plugins"] + += Plugins in {product} + +The {product} application offers a unified platform with various plugins. Using the plugin ecosystem within the {product-short} application, you can access any kind of development infrastructure or software development tool. + +The plugins in {product-short} maximize the productivity and streamline the development workflows by maintaining the consistency in the overall user experience. \ No newline at end of file diff --git a/modules/conscious-language.adoc b/modules/conscious-language.adoc new file mode 100644 index 0000000000..5772b29a50 --- /dev/null +++ b/modules/conscious-language.adoc @@ -0,0 +1,12 @@ +//// +Conscious language note +Add this to the preface. +//// +[preface] +[discrete] +[id="making-open-source-more-inclusive"] += Making open source more inclusive +Red Hat is committed to replacing problematic language in our code, documentation, and web properties. +We are beginning with these four terms: master, slave, blacklist, and whitelist. +Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. +For more details, see link:https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[our CTO Chris Wright's message]. diff --git a/modules/customization/proc-customize-rhdh-branding-logo.adoc b/modules/customization/proc-customize-rhdh-branding-logo.adoc new file mode 100644 index 0000000000..3f7fff0073 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-branding-logo.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc +//restored module as per slack thread: https://redhat-internal.slack.com/archives/C04CUSD4JSG/p1726856912238529 + +[id="proc-customize-rhdh-branding-logo_{context}"] += Customizing the branding logo of your {product-short} instance + +You can customize the branding logo of your {product-short} instance by configuring the `branding` section the `app-config-rhdh.yaml` file, as shown in the following example: + +[source,yaml] +---- +app: + branding: + fullLogo: ${BASE64_EMBEDDED_FULL_LOGO} <1> + iconLogo: ${BASE64_EMBEDDED_ICON_LOGO} <2> +---- + +where: + +<1> `fullLogo` is the logo on the expanded (pinned) sidebar and expects a base64 encoded image. +<2> `iconLogo` is the logo on the collapsed (unpinned) sidebar and expects a base64 encoded image. + +You can also customize the width of the branding logo by setting a value for the `fullLogoWidth` field in the `branding` section, as shown in the following example: + +[source,yaml] +---- +app: + branding: + fullLogoWidth: 110px <1> +# ... +---- + +<1> The default value for the logo width is `110px`. The following units are supported: `integer`, `px`, `em`, `rem`, percentage. + diff --git a/modules/customization/proc-customize-rhdh-font.adoc b/modules/customization/proc-customize-rhdh-font.adoc new file mode 100644 index 0000000000..9ba0c69e82 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-font.adoc @@ -0,0 +1,59 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="proc-customize-rhdh-font_{context}"] += Customizing the font for your {product-short} instance + +You can configure the `typography` section of the `app-config-rhdh.yaml` file to change the default font family and size of the page text, as well as the font family and size of each heading level, as shown in the following example: + +[source,yaml] +---- +app: + branding: + theme: + light: + typography: + fontFamily: "Times New Roman" + htmlFontSize: 11 # smaller is bigger + h1: + fontFamily: "Times New Roman" + fontSize: 40 + h2: + fontFamily: "Times New Roman" + fontSize: 30 + h3: + fontFamily: "Times New Roman" + fontSize: 30 + h4: + fontFamily: "Times New Roman" + fontSize: 30 + h5: + fontFamily: "Times New Roman" + fontSize: 30 + h6: + fontFamily: "Times New Roman" + fontSize: 30 + dark: + typography: + fontFamily: "Times New Roman" + htmlFontSize: 11 # smaller is bigger + h1: + fontFamily: "Times New Roman" + fontSize: 40 + h2: + fontFamily: "Times New Roman" + fontSize: 30 + h3: + fontFamily: "Times New Roman" + fontSize: 30 + h4: + fontFamily: "Times New Roman" + fontSize: 30 + h5: + fontFamily: "Times New Roman" + fontSize: 30 + h6: + fontFamily: "Times New Roman" + fontSize: 30 +# ... +---- diff --git a/modules/customization/proc-customize-rhdh-page-theme.adoc b/modules/customization/proc-customize-rhdh-page-theme.adoc new file mode 100644 index 0000000000..45e17866d5 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-page-theme.adoc @@ -0,0 +1,43 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="proc-customize-rhdh-page-theme_{context}"] += Customizing the page theme header for your {product-short} instance + +You can customize the header color for the light and dark theme modes in your {product-short} instance by modifying the `branding.theme` section of the `app-config-rhdh.yaml` file. You can also customize the page headers for additional {product-short} pages, such as the *Home*, *Catalog*, and *APIs* pages. + +[source,yaml] +---- +app: + branding: + theme: + light: <1> + palette: {} + pageTheme: + default: <2> + backgroundColor: "" <3> + fontColor: "" <4> + shape: none <5> + apis: <6> + backgroundColor: "" + fontColor: "" + shape: none + dark: + palette: {} + pageTheme: + default: + backgroundColor: "" + fontColor: "" + shape: none +# ... +---- + +<1> The theme mode, for example, `light` or `dark` +<2> The `yaml` header for the default page theme configuration +<3> The color of the page header background, for example, `#ffffff` or `white` +<4> The color of the text in the page header, for example, `#000000` or `black` +<5> The pattern on the page header, for example, `wave`, `round`, or `none` +<6> The `yaml` header for a specific page theme configuration, for example, `apis`, `home` + +//The page theme name depends on the plugin that you are customizing the page header for. +//can include information about this topic in the future. diff --git a/modules/customization/proc-customize-rhdh-palette.adoc b/modules/customization/proc-customize-rhdh-palette.adoc new file mode 100644 index 0000000000..20f3b418d7 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-palette.adoc @@ -0,0 +1,43 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="proc-customize-rhdh-branding_{context}"] += Customizing the theme mode color palettes for your {product-short} instance + +You can customize the color palettes of the light and dark theme modes in your {product-short} instance by configuring the `light.palette` and `dark.palette` parameters in the `branding.theme` section of the `app-config-rhdh.yaml` file, as shown in the following example: + +[source,yaml] +---- +app: + branding: + theme: + light: + palette: + primary: + main: <1> + navigation: + indicator: <2> + pageTheme: + default: + backgroundColor: [, ] <3> + dark: + palette: + primary: + main: <4> + navigation: + indicator: <5> + pageTheme: + default: + backgroundColor: [, ] <6> +# ... +---- + +<1> The main primary color for the light color palette, for example, `#ffffff` or `white` +<2> The color of the navigation indicator for the light color palette, which is a vertical bar that indicates the selected tab in the navigation panel, for example, `#FF0000` or `red` +<3> The background color for the default page theme for the light color palette, for example, `#ffffff` or `white` +<4> The main primary color for the dark color palette, for example, `#000000` or `black` +<5> The color of the navigation indicator for the dark color palette, which is a vertical bar that indicates the selected tab in the navigation panel, for example, `#FF0000` or `red` +<6> The background color for the default page theme for the dark color palette, for example, `#000000` or `black` + +.Additional resources +* xref:proc-customizing-rhdh-theme-mode_{context}[] diff --git a/modules/customization/proc-customize-rhdh-sidebar-logo.adoc b/modules/customization/proc-customize-rhdh-sidebar-logo.adoc new file mode 100644 index 0000000000..55fc38a9f0 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-sidebar-logo.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// None +//Archived since the section has been merged with proc-customize-rhdh-branding-logo.adoc in PR #514 + +[id="proc-customize-rhdh-sidebar-logo_{context}"] += Customizing the sidebar logo width for your {product-short} instance +You can customize the width of the sidebar logo by setting a value for the `fullLogoWidth` field in the `branding` section of the `app-config-rhdh.yaml` file, as shown in the following example: + +[source,yaml] +---- +app: + branding: + fullLogoWidth: 110px +# ... +---- + +The default value for the logo width is 110px. The following units are supported: integer, `px`, `em`, `rem`, percentage. diff --git a/modules/customization/proc-customize-rhdh-sidebar-menuitems.adoc b/modules/customization/proc-customize-rhdh-sidebar-menuitems.adoc new file mode 100644 index 0000000000..61895a6456 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-sidebar-menuitems.adoc @@ -0,0 +1,128 @@ +[id='proc-customize-rhdh-sidebar-menuitems_{context}'] += Customizing the sidebar menu items for your {product-short} instance + +The sidebar menu in {product} consists of two main parts: + +* *Main menu items*: These items are the static menu items that form the core navigation structure of sidebar. These menu items remain consistent and are predefined. + +* *Dynamic plugin menu items*: These items are displayed beneath the main menu and can be customized based on the plugins installed. The main menu items section is dynamic and can change based on your preferences and installed plugins. + +.Procedure + +. Customize the main menu items using the following steps: ++ +-- +.. Open the `app-config-rhdh.yaml` file. +.. To customize the order and parent-child relationships for the main menu items, use the `dynamicPlugins.frontend.default.main-menu-items.menuItems` field. +.. For dynamic plugin menu items, use the `dynamicPlugins.frontend..menuItems` field. + +.Example `app-config-rhdh.yaml` file +[source,yaml] +---- +dynamicPlugins: + frontend: + : # same as `scalprum.name` key in plugin's `package.json` + menuItems: # optional, allows you to configure plugin menu items in the main sidebar navigation + : # unique name in the plugin menu items list <1> + icon: home | group | category | extension | school | __ # <2> + title: My Plugin Page # optional, same as `menuItem.text` in `dynamicRoutes` <3> + priority: 10 # optional, defines the order of menu items in the sidebar <4> + parent: favorites # optional, defines parent-child relationships for nested menu items <5> +---- + +You can modify the fields in the previous example to configure the desired order and parent-child relationships of the sidebar menu items. + +<1> This attribute represents a unique name in the main sidebar navigation. It can denote either a standalone menu item or a parent menu item. If this attribute represents a plugin menu item, the name of the attribute must match with the corresponding path in `dynamicRoutes`. For example, if `dynamicRoutes` defines `path: /my-plugin`, then `menu_item_name` must be defined as `my-plugin`. ++ +For more complex, multi-segment paths such as `path: /metrics/users/info`, the `menu_item_name` must use dot notation to represent the full path, for example, `metrics.users.info`. Trailing and leading slashes in paths are ignored. For example, `path: /docs` results in `menu_item_name: docs`, and `path: /metrics/users` results in `menu_item_name: metrics.users`. + +<2> This optional attribute specifies the icon for the menu item. You can use default icons or extend the icon set with dynamic plugins. {product-short} also provides additional icons in its internal library, such as: ++ +.Home Icon in the internal library +[source, yaml] +---- +dynamicPlugins: + frontend: + : + menuItems: + : + icon: home +---- ++ +Similarly, the internal library includes icons for `group`, `category`, `extension`, and `school`. If the icon is already defined in the `dynamicRoutes` configuration under `menuItem.icon`, it can be removed from the in the `menuItems` configuration. Also, both SVG and HTML image icons are supported. For example: ++ +.Example SVG icon +[source,html] +---- +icon: ... +---- ++ +.Example image icon +[source,html] +---- +icon: https://img.icons8.com/ios-glyphs/20/FFFFFF/shop.png +---- + +<3> This optional attribute specifies the title of the menu item. It can be removed if the title is already specified in the `dynamicRoutes` configuration under `menuItem.text`. + +<4> This optional attribute sets the order in which menu items appear in the sidebar. The default priority is 0, which places the item at the bottom of the list. A higher priority value places the item higher in the sidebar. You can define this attribute for each section. + +<5> This optional attribute specifies the parent menu item under which the current item is nested. If this attribute is used, the parent menu item must be defined elsewhere in the `menuItems` configuration of any enabled plugin. You can define this attribute for each section. + +.Example `menuItems` configuration +[source,yaml] +---- +dynamicPlugins: + frontend: + : + dynamicRoutes: + - path: /my-plugin + module: CustomModule + importName: FooPluginPage + menuItem: + icon: fooIcon + text: Foo Plugin Page + menuItems: + my-plugin: # matches `path` in `dynamicRoutes` + priority: 10 # controls order of plugins under the parent menu item + parent: favorites # nests this plugin under the `favorites` parent menu item + favorites: # configuration for the parent menu item + icon: favorite # icon from RHDH system icons + title: Favorites # title for the parent menu item + priority: 100 # controls the order of this top-level menu item +---- +-- + +. To ensure that a menu item is identified as a main menu item, you must add `default.` prefix to its key. For example: ++ +-- +.Example configuration of main menu items in sidebar navigation +[source,yaml] +---- +dynamicPlugins: + frontend: + default.main-menu-items: # key for configuring static main menu items + default.: # key of the menu item configuration. `default.` prefix is required for a main menu item key <1> + parent: my_menu_group # optional, specifies the parent menu item for this item + priority: 10 # optional, specifies the order of this menu item within its menu level + default.: # must be configured if it is specified as the parent of any menu items. `default.` prefix is required for a main menu item key + icon: my_menu_group_icon # required for parent menu items, defines the icon for the menu group + title: my_menu_group_title # required for parent menu items, defines the icon for the menu group + priority: 100 # optional, specifies the order of the parent menu item in the sidebar +---- + + +<1> The `default.` prefix identifes an item as a main menu item. You can add the `default.` prefix to both individual menu items or parent menu group configuration, such as `default.` in the previous example. + +[NOTE] +==== +The default priority of main menu items determines their order in the sidebar. You can customize the order of the static main menu items by adjusting their priority values. Ensure that the priority and title of each item is clear to facilitate easy reordering. +==== +-- + + + + + + + diff --git a/modules/customization/proc-customize-rhdh-tab-tooltip.adoc b/modules/customization/proc-customize-rhdh-tab-tooltip.adoc new file mode 100644 index 0000000000..66de517a91 --- /dev/null +++ b/modules/customization/proc-customize-rhdh-tab-tooltip.adoc @@ -0,0 +1,14 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="proc-customizing-rhdh-tab-tooltip_{context}"] += Customizing the application title for your {product-short} instance + +You can customize the app title text by setting a value for the `title` field, as shown in the following example: + +[source,yaml] +---- +app: + title: My custom developer hub +# ... +---- diff --git a/modules/customization/proc-customize-rhdh-theme-mode.adoc b/modules/customization/proc-customize-rhdh-theme-mode.adoc new file mode 100644 index 0000000000..72c638017c --- /dev/null +++ b/modules/customization/proc-customize-rhdh-theme-mode.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="proc-customizing-rhdh-theme-mode_{context}"] += Customizing the theme mode for your {product-short} instance + +[NOTE] +==== +In {product-short}, theme configurations are used to change the look and feel of different UI components. So, you might notice changes in different UI components, such as buttons, tabs, sidebars, cards, and tables along with some changes in background color and font used on the {product-very-short} pages. +==== + +You can choose one of the following theme modes for your {product-short} instance: + +* Light theme +* Dark theme +* Auto + +The default theme mode is Auto, which automatically sets the light or dark theme based on your system preferences. + +.Prerequisites + +* You are logged in to the {product-short} web console. + +.Procedure + +. From the {product-short} web console, click *Settings*. +. From the *Appearance* panel, click *LIGHT THEME*, *DARK THEME*, or *AUTO* to change the theme mode. ++ +image::user-guide/custom-theme-mode-1.png[] + +// [NOTE] +// ==== +// If you choose the *AUTO* theme mode, ... +// ==== diff --git a/modules/customization/ref-customize-rhdh-custom-components.adoc b/modules/customization/ref-customize-rhdh-custom-components.adoc new file mode 100644 index 0000000000..d7305ff86f --- /dev/null +++ b/modules/customization/ref-customize-rhdh-custom-components.adoc @@ -0,0 +1,51 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="ref-customize-rhdh-custom-components_{context}"] += Custom component options for your {product-short} instance + +There are two component variants that you can use to customize various components of your {product-short} theme: + +* *Patternfly* +* *MUI* + +In addition to assigning a component variant to each parameter in the light or dark theme mode configurations, you can toggle the `rippleEffect` `on` or `off`. + +The following code shows the options that you can use in the `app-config-rhdh.yaml` file to configure the theme components for your {product-short} instance: + +[source,yaml] +---- +app: + branding: + theme: + light: + options: + rippleEffect: off / on + paper: patternfly / mui + buttons: patternfly / mui + inputs: patternfly / mui + accordions: patternfly / mui + sidebars: patternfly / mui + pages: patternfly / mui + headers: patternfly / mui + toolbars: patternfly / mui + dialogs: patternfly / mui + cards: patternfly / mui + tables: patternfly / mui + tabs: patternfly / mui + dark: + options: + rippleEffect: off / on + paper: patternfly / mui + buttons: patternfly / mui + inputs: patternfly / mui + accordions: patternfly / mui + sidebars: patternfly / mui + pages: patternfly / mui + headers: patternfly / mui + toolbars: patternfly / mui + dialogs: patternfly / mui + cards: patternfly / mui + tables: patternfly / mui + tabs: patternfly / mui +---- diff --git a/modules/customization/ref-customize-rhdh-default-backstage.adoc b/modules/customization/ref-customize-rhdh-default-backstage.adoc new file mode 100644 index 0000000000..ecd6a48205 --- /dev/null +++ b/modules/customization/ref-customize-rhdh-default-backstage.adoc @@ -0,0 +1,254 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="ref-customize-rhdh-default-backstage_{context}"] += Default Backstage theme + +You can use the default Backstage theme configurations to make your {product-short} instance look like a standard Backstage instance. You can also modify the `app-config-rhdh.yaml` file to customize or disable particular parameters. + +== Default Backstage theme color palette + +The `app-config-rhdh.yaml` file uses the following configurations for the default Backstage color palette: + +[source,yaml] +---- +app: + branding: + theme: + light: + variant: "backstage" + mode: "light" + palette: + background: + default: "#F8F8F8" + paper: "#FFFFFF" + banner: + closeButtonColor: "#FFFFFF" + error: "#E22134" + info: "#2E77D0" + link: "#000000" + text: "#FFFFFF" + warning: "#FF9800" + border: "#E6E6E6" + bursts: + backgroundColor: + default: "#7C3699" + fontColor: "#FEFEFE" + gradient: + linear: "linear-gradient(-137deg, #4BB8A5 0%, #187656 100%)" + slackChannelText: "#ddd" + errorBackground: "#FFEBEE" + errorText: "#CA001B" + gold: "#FFD600" + highlight: "#FFFBCC" + infoBackground: "#ebf5ff" + infoText: "#004e8a" + link: "#0A6EBE" + linkHover: "#2196F3" + navigation: + background: "#171717" + color: "#b5b5b5" + indicator: "#9BF0E1" + navItem: + hoverBackground: "#404040" + selectedColor: "#FFF" + submenu: + background: "#404040" + pinSidebarButton: + background: "#BDBDBD" + icon: "#181818" + primary: + main: "#1F5493" + status: + aborted: "#757575" + error: "#E22134" + ok: "#1DB954" + pending: "#FFED51" + running: "#1F5493" + warning: "#FF9800" + tabbar: + indicator: "#9BF0E1" + textContrast: "#000000" + textSubtle: "#6E6E6E" + textVerySubtle: "#DDD" + warningBackground: "#F59B23" + warningText: "#000000" + + dark: + variant: "backstage" + mode: "dark" + palette: + background: + default: "#333333" + paper: "#424242" + banner: + closeButtonColor: "#FFFFFF" + error: "#E22134" + info: "#2E77D0" + link: "#000000" + text: "#FFFFFF" + warning: "#FF9800" + border: "#E6E6E6" + bursts: + backgroundColor: + default: "#7C3699" + fontColor: "#FEFEFE" + gradient: + linear: "linear-gradient(-137deg, #4BB8A5 0%, #187656 100%)" + slackChannelText: "#ddd" + errorBackground: "#FFEBEE" + errorText: "#CA001B" + gold: "#FFD600" + highlight: "#FFFBCC" + infoBackground: "#ebf5ff" + infoText: "#004e8a" + link: "#9CC9FF" + linkHover: "#82BAFD" + mode: "dark" + navigation: + background: "#424242" + color: "#b5b5b5" + indicator: "#9BF0E1" + navItem: + hoverBackground: "#404040" + selectedColor: "#FFF" + submenu: + background: "#404040" + pinSidebarButton: + background: "#BDBDBD" + icon: "#404040" + primary: + dark: "#82BAFD" + main: "#9CC9FF" + secondary: + main: "#FF88B2" + status: + aborted: "#9E9E9E" + error: "#F84C55" + ok: "#71CF88" + pending: "#FEF071" + running: "#3488E3" + warning: "#FFB84D" + tabbar: + indicator: "#9BF0E1" + textContrast: "#FFFFFF" + textSubtle: "#CCCCCC" + textVerySubtle: "#727272" + warningBackground: "#F59B23" + warningText: "#000000" +---- + +Alternatively, you can use the following `variant` and `mode` values in the `app-config-rhdh.yaml` file to apply the previous default configuration: + +[source,yaml] +---- +app: + branding: + theme: + light: + variant: "backstage" + mode: "light" + dark: + variant: "backstage" + mode: "dark" +---- + +== Default Backstage page themes + +The default Backstage header color is white in light mode and black in dark mode, as shown in the following `app-config-rhdh.yaml` file configuration: + +[source,yaml] +---- +app: + branding: + theme: + light: + palette: {} + defaultPageTheme: default + pageTheme: + default: + backgroundColor: ['#005B4B'] # teal + fontColor: '#ffffff' + shape: wave + documentation: + backgroundColor: ['#C8077A', '#C2297D'] # pinkSea + fontColor: '#ffffff' + shape: wave2 + tool: + backgroundColor: ['#8912CA', '#3E00EA'] # purpleSky + fontColor: '#ffffff' + shape: round + service: + backgroundColor: ['#006D8F', '#0049A1'] # marineBlue + fontColor: '#ffffff' + shape: wave + website: + backgroundColor: ['#0027AF', '#270094'] # veryBlue + fontColor: '#ffffff' + shape: wave + library: + backgroundColor: ['#98002B', '#8D1134'] # rubyRed + fontColor: '#ffffff' + shape: wave + other: + backgroundColor: ['#171717', '#383838'] # darkGrey + fontColor: '#ffffff' + shape: wave + app: + backgroundColor: ['#BE2200', '#A41D00'] # toastyOrange + fontColor: '#ffffff' + shape: shapes.wave + apis: + backgroundColor: ['#005B4B'] # teal + fontColor: '#ffffff' + shape: wave2 + card: + backgroundColor: ['#4BB8A5', '#187656'] # greens + fontColor: '#ffffff' + shape: wave + + dark: + palette: {} + defaultPageTheme: default + pageTheme: + default: + backgroundColor: ['#005B4B'] # teal + fontColor: '#ffffff' + shape: wave + documentation: + backgroundColor: ['#C8077A', '#C2297D'] # pinkSea + fontColor: '#ffffff' + shape: wave2 + tool: + backgroundColor: ['#8912CA', '#3E00EA'] # purpleSky + fontColor: '#ffffff' + shape: round + service: + backgroundColor: ['#006D8F', '#0049A1'] # marineBlue + fontColor: '#ffffff' + shape: wave + website: + backgroundColor: ['#0027AF', '#270094'] # veryBlue + fontColor: '#ffffff' + shape: wave + library: + backgroundColor: ['#98002B', '#8D1134'] # rubyRed + fontColor: '#ffffff' + shape: wave + other: + backgroundColor: ['#171717', '#383838'] # darkGrey + fontColor: '#ffffff' + shape: wave + app: + backgroundColor: ['#BE2200', '#A41D00'] # toastyOrange + fontColor: '#ffffff' + shape: shapes.wave + apis: + backgroundColor: ['#005B4B'] # teal + fontColor: '#ffffff' + shape: wave2 + card: + backgroundColor: ['#4BB8A5', '#187656'] # greens + fontColor: '#ffffff' + shape: wave +---- diff --git a/modules/customization/ref-customize-rhdh-default-rhdh.adoc b/modules/customization/ref-customize-rhdh-default-rhdh.adoc new file mode 100644 index 0000000000..df5d7029d3 --- /dev/null +++ b/modules/customization/ref-customize-rhdh-default-rhdh.adoc @@ -0,0 +1,241 @@ +// Module included in the following assemblies: +// assembly-customize-rhdh-theme.adoc + +[id="ref-customize-rhdh-default-rhdh_{context}"] += Default {product} theme + +You can use the default {product} theme configurations to make your {product-short} instance look like a standard {product} instance. You can also modify the `app-config-rhdh.yaml` file to customize or disable particular parameters. + +== Default {product} theme color palette + +The `app-config-rhdh.yaml` file uses the following configurations for the default {product} color palette: + +[source,yaml] +---- +app: + branding: + theme: + light: + variant: "rhdh" + mode: "light" + palette: + background: + default: "#F8F8F8" + paper: "#FFFFFF" + banner: + closeButtonColor: "#FFFFFF" + error: "#E22134" + info: "#2E77D0" + link: "#000000" + text: "#FFFFFF" + warning: "#FF9800" + border: "#E6E6E6" + bursts: + backgroundColor: + default: "#7C3699" + fontColor: "#FEFEFE" + gradient: + linear: "linear-gradient(-137deg, #4BB8A5 0%, #187656 100%)" + slackChannelText: "#ddd" + errorBackground: "#FFEBEE" + errorText: "#CA001B" + gold: "#FFD600" + highlight: "#FFFBCC" + infoBackground: "#ebf5ff" + infoText: "#004e8a" + link: "#0A6EBE" + linkHover: "#2196F3" + mode: "light" + navigation: + background: "#222427" + indicator: "#0066CC" + color: "#ffffff" + selectedColor: "#ffffff" + navItem: + hoverBackground: "#3c3f42" + submenu: + background: "#222427" + pinSidebarButton: + background: "#BDBDBD" + icon: "#181818" + primary: + main: "#0066CC" + secondary: + main: "#8476D1" + status: + aborted: "#757575" + error: "#E22134" + ok: "#1DB954" + pending: "#FFED51" + running: "#1F5493" + warning: "#FF9800" + tabbar: + indicator: "#9BF0E1" + textContrast: "#000000" + textSubtle: "#6E6E6E" + textVerySubtle: "#DDD" + warningBackground: "#F59B23" + warningText: "#000000" + text: + primary: "#151515" + secondary: "#757575" + rhdh: + general: + disabledBackground: "#D2D2D2" + disabled: "#6A6E73" + searchBarBorderColor: "#E4E4E4" + formControlBackgroundColor: "#FFF" + mainSectionBackgroundColor: "#FFF" + headerBottomBorderColor: "#C7C7C7" + cardBackgroundColor: "#FFF" + sideBarBackgroundColor: "#212427" + cardBorderColor: "#C7C7C7" + tableTitleColor: "#181818" + tableSubtitleColor: "#616161" + tableColumnTitleColor: "#151515" + tableRowHover: "#F5F5F5" + tableBorderColor: "#E0E0E0" + tableBackgroundColor: "#FFF" + tabsBottomBorderColor: "#D2D2D2" + contrastText: "#FFF" + primary: + main: "#0066CC" + focusVisibleBorder: "#0066CC" + secondary: + main: "#8476D1" + focusVisibleBorder: "#8476D1" + cards: + headerTextColor: "#151515" + headerBackgroundColor: "#FFF" + headerBackgroundImage: "none" + + dark: + variant: "rhdh" + mode: "dark" + palette: + background: + default: "#333333" + paper: "#424242" + banner: + closeButtonColor: "#FFFFFF" + error: "#E22134" + info: "#2E77D0" + link: "#000000" + text: "#FFFFFF" + warning: "#FF9800" + border: "#E6E6E6" + bursts: + backgroundColor: + default: "#7C3699" + fontColor: "#FEFEFE" + gradient: + linear: "linear-gradient(-137deg, #4BB8A5 0%, #187656 100%)" + slackChannelText: "#ddd" + errorBackground: "#FFEBEE" + errorText: "#CA001B" + gold: "#FFD600" + highlight: "#FFFBCC" + infoBackground: "#ebf5ff" + infoText: "#004e8a" + link: "#9CC9FF" + linkHover: "#82BAFD" + mode: "dark" + navigation: + background: "#0f1214" + indicator: "#0066CC" + color: "#ffffff" + selectedColor: "#ffffff" + navItem: + hoverBackground: "#3c3f42" + submenu: + background: "#0f1214" + pinSidebarButton: + background: "#BDBDBD" + icon: "#404040" + primary: + main: "#1FA7F8" + secondary: + main: "#B2A3FF" + status: + aborted: "#9E9E9E" + error: "#F84C55" + ok: "#71CF88" + pending: "#FEF071" + running: "#3488E3" + warning: "#FFB84D" + tabbar: + indicator: "#9BF0E1" + textContrast: "#FFFFFF" + textSubtle: "#CCCCCC" + textVerySubtle: "#727272" + warningBackground: "#F59B23" + warningText: "#000000" + + rhdh: + general: + disabledBackground: "#444548" + disabled: "#AAABAC" + searchBarBorderColor: "#57585a" + formControlBackgroundColor: "#36373A" + mainSectionBackgroundColor: "#0f1214" + headerBottomBorderColor: "#A3A3A3" + cardBackgroundColor: "#292929" + sideBarBackgroundColor: "#1b1d21" + cardBorderColor: "#A3A3A3" + tableTitleColor: "#E0E0E0" + tableSubtitleColor: "#E0E0E0" + tableColumnTitleColor: "#E0E0E0" + tableRowHover: "#0f1214" + tableBorderColor: "#515151" + tableBackgroundColor: "#1b1d21" + tabsBottomBorderColor: "#444548" + contrastText: "#FFF" + primary: + main: "#1FA7F8" + focusVisibleBorder: "#ADD6FF" + secondary: + main: "#B2A3FF" + focusVisibleBorder: "#D0C7FF" + cards: + headerTextColor: "#FFF" + headerBackgroundColor: "#0f1214" + headerBackgroundImage: "none" +---- + +Alternatively, you can use the following `variant` and `mode` values in the `app-config-rhdh.yaml` file to apply the previous default configuration: + +[source,yaml] +---- +app: + branding: + theme: + light: + variant: "rhdh" + mode: "light" + dark: + variant: "rhdh" + mode: "dark" +---- + +== Default {product} page themes + +The default {product-short} header color is white in light mode and black in dark mode, as shown in the following `app-config-rhdh.yaml` file configuration: + +[source,yaml] +---- +app: + branding: + theme: + light: + palette: {} + defaultPageTheme: default + pageTheme: + default: + backgroundColor: "#ffffff" + dark: + palette: {} + defaultPageTheme: default + pageTheme: + default: + backgroundColor: "#0f1214" +---- diff --git a/modules/discover/con_benefits-of-rhdh.adoc b/modules/discover/con_benefits-of-rhdh.adoc new file mode 100644 index 0000000000..752fefd83c --- /dev/null +++ b/modules/discover/con_benefits-of-rhdh.adoc @@ -0,0 +1,28 @@ +:_newdoc-version: 2.18.3 +:_template-generated: 2024-10-18 + +:_mod-docs-content-type: CONCEPT + +[id="benefits-of-rhdh_{context}"] += Benefits of {product} + +{product} is designed to streamline developer workflow by providing a consistent self-service experience and can simplify application development through pre-built resources and enhanced collaboration tools. + +The following are the benefits of {product}: + +* Increased developer productivity. Increases productivity by eliminating common organizational challenges, enabling seamless collaboration, and providing clear guidelines for creating, developing, and deploying applications. + +* Unified self-service dashboard. Provides development teams with a unified dashboard covering various aspects such as Git, CI/CD, SAST/DAST, Supply Chain, OpenShift/Kubernetes cluster, JIRA, monitoring, API, documentation, and more, facilitated by over 150 plugins. All curated by a platform engineering team, aligning with the company’s best practices. + +* Best practices through software templates. Automates organizational best practices by encoding common tasks such as creating new applications, running Ansible jobs, and establishing CI/CD pipelines for production deployment in Git. + +* Scalable technical documentation. Code and documentation resides in the same repository, eliminating dependencies on proprietary document systems. + +* Efficient onboarding for new developers. New developers can adapt quickly and become productive within a short timeframe. + +* Robust enterprise Role-Based Access Control (RBAC). Empowers administrators to create roles, assign users or groups to roles, and implement robust security policies for enhanced access control. + + +[role="_additional-resources"] +.Additional resources +* For more information about the different features of {product} and how you can extend the upstream Backstage product by providing additional features such as integration with OpenShift, enterprise role-based access control (RBAC), and dynamic plugins, see link:https://developers.redhat.com/rhdh/overview?intcmp=7015Y000003swEuQAI&source=sso[{product}]. \ No newline at end of file diff --git a/modules/dynamic-plugins/con-ansible-plugin-admin.adoc b/modules/dynamic-plugins/con-ansible-plugin-admin.adoc new file mode 100644 index 0000000000..2572a6bbed --- /dev/null +++ b/modules/dynamic-plugins/con-ansible-plugin-admin.adoc @@ -0,0 +1,19 @@ += Installing Ansible plug-ins for {product} + +Ansible plug-ins for {product} deliver an Ansible-specific portal experience with curated learning paths, +push-button content creation, integrated development tools, and other opinionated resources. + +[IMPORTANT] +==== +The Ansible plug-ins are a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +To install and configure the Ansible plugins, see +link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/installing_ansible_plug-ins_for_red_hat_developer_hub/index[_Installing Ansible plug-ins for Red Hat Developer Hub_]. + diff --git a/modules/dynamic-plugins/con-ansible-plugin-user.adoc b/modules/dynamic-plugins/con-ansible-plugin-user.adoc new file mode 100644 index 0000000000..d8a4c67a91 --- /dev/null +++ b/modules/dynamic-plugins/con-ansible-plugin-user.adoc @@ -0,0 +1,19 @@ += Using Ansible plug-ins for {product} + +Ansible plug-ins for {product} deliver an Ansible-specific portal experience with curated learning paths, +push-button content creation, integrated development tools, and other opinionated resources. + +[IMPORTANT] +==== +The Ansible plug-ins are a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +To use the Ansible plugins, see +link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/using_ansible_plug-ins_for_red_hat_developer_hub/index[_Using Ansible plug-ins for Red Hat Developer Hub_]. + diff --git a/modules/dynamic-plugins/con-ansible-plugin.adoc b/modules/dynamic-plugins/con-ansible-plugin.adoc new file mode 100644 index 0000000000..6b38258a64 --- /dev/null +++ b/modules/dynamic-plugins/con-ansible-plugin.adoc @@ -0,0 +1,26 @@ += Installing and using Ansible plug-ins for {product} + +Ansible plug-ins for {product} deliver an Ansible-specific portal experience with curated learning paths, +push-button content creation, integrated development tools, and other opinionated resources. + +[IMPORTANT] +==== +The Ansible plug-ins are a Technology Preview feature only. + +Technology Preview features are not supported with Red Hat production service level agreements (SLAs), might not be functionally complete, and Red Hat does not recommend using them for production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. + +For more information on Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Scope]. + +Additional detail on how Red Hat provides support for bundled community dynamic plugins is available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +== For administrators + +To install and configure the Ansible plugins, see +link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/installing_ansible_plug-ins_for_red_hat_developer_hub/index[_Installing Ansible plug-ins for Red Hat Developer Hub_]. + +== For users + +To use the Ansible plugins, see +link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/using_ansible_plug-ins_for_red_hat_developer_hub/index[_Using Ansible plug-ins for Red Hat Developer Hub_]. + diff --git a/modules/dynamic-plugins/con-basic-config-dynamic-plugins.adoc b/modules/dynamic-plugins/con-basic-config-dynamic-plugins.adoc new file mode 100644 index 0000000000..149135fe03 --- /dev/null +++ b/modules/dynamic-plugins/con-basic-config-dynamic-plugins.adoc @@ -0,0 +1,15 @@ +[id="con-basic-config-dynamic-plugins"] + += Basic configuration of dynamic plugins + +Some dynamic plugins require environment variables to be set. If a mandatory environment variable is not set, and the plugin is enabled, then the application might fail at startup. + +//The mandatory environment variables for each plugin are listed in the xref:rhdh-supported-plugins[Dynamic plugins support matrix]. + +[NOTE] +==== +Zib-bomb detection +When installing some dynamic plugin containing large files, if the installation script considers the package archive to be a Zib-Bomb, the installation fails. + +To increase the maximum permitted size of a file inside a package archive, you can increase the `MAX_ENTRY_SIZE` environment value of the deployment `install-dynamic-plugins initContainer` from the default size of `20000000` bytes. +==== diff --git a/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc b/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc new file mode 100644 index 0000000000..141d0819c5 --- /dev/null +++ b/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc @@ -0,0 +1,68 @@ +[id="con-dynamic-plugin-cache_{context}"] + += Using the dynamic plugins cache +The dynamic plugins cache in {product} ({product-very-short}) enhances the installation process and reduces platform boot time by storing previously installed plugins. If the configuration remains unchanged, this feature prevents the need to re-download plugins on subsequent boots. + +When you enable dynamic plugins cache: + +* The system calculates a checksum of each plugin's YAML configuration (excluding `pluginConfig`). +* The checksum is stored in a file named `dynamic-plugin-config.hash` within the plugin's directory. +* During boot, if a plugin's package reference matches the previous installation and the checksum is unchanged, the download is skipped. +* Plugins that are disabled since the previous boot are automatically removed. + +== Enabling the dynamic plugins cache +To enable the dynamic plugins cache in {product-very-short}, the plugins directory `dynamic-plugins-root` must be a persistent volume. + +For Helm chart installations, a persistent volume named `dynamic-plugins-root` is automatically created. + +For operator-based installations, you must manually create the PersistentVolumeClaim (PVC) as follows: + +[source,yaml] +---- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: dynamic-plugins-root +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + +--- + +apiVersion: rhdh.redhat.com/v1alpha2 +kind: Backstage +metadata: + name: developer-hub +spec: + deployment: + patch: + spec: + template: + spec: + volumes: + - $patch: replace + name: dynamic-plugins-root + persistentVolumeClaim: + claimName: dynamic-plugins-root +---- + +[NOTE] +==== +Future versions of the {product-very-short} operator are planned to automatically create the PVC. +==== + +== Configuring the dynamic plugins cache +You can set the following optional dynamic plugin cache parameters: + +* `forceDownload`: Set to `true` to force a reinstall of the plugin, bypassing the cache. Default is `false`. For example, modify your `dynamic-plugins.yaml` file as follows: ++ +[source,yaml] +---- +plugins: + - disabled: false + forceDownload: true + package: 'oci://quay.io/example-org/example-plugin:v1.0.0!internal-backstage-plugin-example' +---- diff --git a/modules/dynamic-plugins/con-install-dynamic-plugin-helm.adoc b/modules/dynamic-plugins/con-install-dynamic-plugin-helm.adoc new file mode 100644 index 0000000000..23d624207c --- /dev/null +++ b/modules/dynamic-plugins/con-install-dynamic-plugin-helm.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-rhdh-installing-dynamic-plugins.adoc +// * assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc + +:_mod-docs-content-type: CONCEPT +[id="con-install-dynamic-plugin-helm_{context}"] += Installing dynamic plugins using the Helm chart + +You can deploy a {product-short} instance using a Helm chart, which is a flexible installation method. With the Helm chart, you can sideload dynamic plugins into your {product-short} instance without having to recompile your code or rebuild the container. + +To install dynamic plugins in {product-short} using Helm, add the following `global.dynamic` parameters in your Helm chart: + +* `plugins`: the dynamic plugins list intended for installation. By default, the list is empty. You can populate the plugins list with the following fields: +** `package`: a package specification for the dynamic plugin package that you want to install. You can use a package for either a local or an external dynamic plugin installation. For a local installation, use a path to the local folder containing the dynamic plugin. For an external installation, use a package specification from a public NPM repository. +** `integrity` (required for external packages): an integrity checksum in the form of `-` specific to the package. Supported algorithms include `sha256`, `sha384` and `sha512`. +** `pluginConfig`: an optional plugin-specific `app-config` YAML fragment. See plugin configuration for more information. +** `disabled`: disables the dynamic plugin if set to `true`. Default: `false`. +* `includes`: a list of YAML files utilizing the same syntax. + +[NOTE] +==== +The `plugins` list in the `includes` file is merged with the `plugins` list in the main Helm values. If a plugin package is mentioned in both `plugins` lists, the `plugins` fields in the main Helm values override the `plugins` fields in the `includes` file. The default configuration includes the `dynamic-plugins.default.yaml` file, which contains all of the dynamic plugins preinstalled in {product-short}, whether enabled or disabled by default. +==== diff --git a/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc new file mode 100644 index 0000000000..5d3d275937 --- /dev/null +++ b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc @@ -0,0 +1,36 @@ +[id="con-preinstalled-dynamic-plugins"] += Preinstalled dynamic plugins + +{product} is preinstalled with a selection of dynamic plugins. +//For a complete list of dynamic plugins that are included in this release of {product-short}, see the xref:rhdh-supported-plugins[Dynamic plugins support matrix]. + +The following preinstalled dynamic plugins are enabled by default: + +* `@janus-idp/backstage-plugin-analytics-provider-segment` +* `@janus-idp/backstage-scaffolder-backend-module-quay` +* `@janus-idp/backstage-scaffolder-backend-module-regex` +* `@backstage/plugin-techdocs-backend` +* `@backstage/plugin-techdocs` + +Upon application startup, for each plugin that is disabled by default, the `install-dynamic-plugins init container` within the {product-short} pod log displays a message similar to the following: + +[source,yaml] +---- +======= Skipping disabled dynamic plugin ./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic +---- + +To enable this plugin, add a package with the same name to the Helm chart and change the value in the `disabled` field to ‘false’. For example: + +[source,java] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic + disabled: false +---- + +[NOTE] +The default configuration for a plugin is extracted from the `dynamic-plugins.default.yaml` file, however, you can use a `pluginConfig` entry to override the default configuration. diff --git a/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.template.adoc b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.template.adoc new file mode 100644 index 0000000000..8f125e6475 --- /dev/null +++ b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.template.adoc @@ -0,0 +1,34 @@ +[id="con-preinstalled-dynamic-plugins"] + += Preinstalled dynamic plugins + +{product} is preinstalled with a selection of dynamic plugins. For a complete list of dynamic plugins that are included in this release of {product-short}, see the xref:rhdh-supported-plugins[Dynamic plugins support matrix]. + +The following preinstalled dynamic plugins are enabled by default: + +%%ENABLED_PLUGINS%% + +The dynamic plugins that require custom configuration are disabled by default. + +Upon application startup, for each plugin that is disabled by default, the `install-dynamic-plugins init container` within the {product-short} pod log displays a message similar to the following: + +[source,yaml] +---- +======= Skipping disabled dynamic plugin ./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic +---- + +To enable this plugin, add a package with the same name to the Helm chart and change the value in the `disabled` field to ‘false’. For example: + +[source,java] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic + disabled: false +---- + +[NOTE] +The default configuration for a plugin is extracted from the `dynamic-plugins.default.yaml` file, however, you can use a `pluginConfig` entry to override the default configuration. diff --git a/modules/dynamic-plugins/con-rhdh-plugins.adoc b/modules/dynamic-plugins/con-rhdh-plugins.adoc new file mode 100644 index 0000000000..a52508f8b2 --- /dev/null +++ b/modules/dynamic-plugins/con-rhdh-plugins.adoc @@ -0,0 +1,71 @@ +[id="con-rhdh-plugins"] + += Plugins in {product} + +// The {product} application offers a unified platform with various plugins. Using the plugin ecosystem within the {product-short} application, you can access any kind of development infrastructure or software development tool. + +// The plugins in {product-short} maximize the productivity and streamline the development workflows by maintaining the consistency in the overall user experience. + +The {product} ({product-very-short}) application offers a unified platform with various plugins. Using the plugin ecosystem within the {product-very-short} application, you can access any kind of development infrastructure or software development tool. + +Plugins are modular extensions for {product-very-short} that extend functionality, streamline development workflows, and improve the developer experience. You can add and configure plugins in {product-very-short} to access various software development tools. + +Each plugin is designed as a self-contained application and can incorporate any type of content. The plugins utilize a shared set of platform APIs and reusable UI components. Plugins can also retrieve data from external sources through APIs or by relying on external modules to perform the tasks. + +{product-very-short} provides both static and dynamic plugins that enhance its functionality. Static plugins are integrated into the core of the {product-very-short} application, while dynamic plugins can be sideloaded into your {product-short} instance without the need to recompile your code or rebuild the container. + +To install or update a static plugin you must update your {product-very-short} application source code and rebuild the application and container image. + +To install or update a dynamic plugin, you must restart your {product-very-short} application source code after installing the plugin. + +You can also import your own custom-built or third-party plugins or create new features using dynamic plugins. + + +Dynamic plugins boost modularity and scalability by enabling more flexible and efficient functionality loading, significantly enhancing the developer experience and customization of your {product-very-short} instance. + +== Dynamic plugins in {product} +You can use {product-very-short} dynamic plugins in environments where flexibility, scalability, and customization are key. Using dynamic plugins in {product-very-short} provides: + +Modularity and extensibility:: +You can add or modify features without altering the core {product-very-short} application. This modular approach makes it easier to extend functionality as needs evolve. + +Customization:: +You can tailor {product-very-short} to fit specific workflows and use cases, enhancing the overall user experience. + +Reduced maintenance and update overhead:: +You can deploy the updates or new features independently of the main {product-very-short} codebase, reducing the risks and efforts associated with maintaining and updating the platform. + +Faster iteration:: +You can create and test new features more rapidly as plugins, encouraging experimentation and enabling you to quickly iterate based on feedback. + +Improved collaboration:: +You can share plugins across teams or even externally. This sharing can foster collaboration and reduce duplication of effort, as well as help establish best practices across an organization. + +Scalability:: +As organizations grow, their needs become complex. Dynamic plugins enable {product-very-short} to scale alongside such complex needs, accommodating an increasing number of users and services. + +Ecosystem growth:: +Fostering the development of plugins can create a dynamic ecosystem around {product-very-short}. This community can contribute to plugins that cater to different needs, thereby enhancing the platform. + +Security and compliance:: +You can develop plugins with specific security and compliance requirements in mind, ensuring that {product-very-short} installations meet the necessary standards without compromising the core application. + +Overall, the use of dynamic plugins in {product-very-short} promotes a flexible, adaptable, and sustainable approach to managing and scaling development infrastructure. + +== Comparing dynamic plugins to static plugins +Static plugins are built into the core of the {product-very-short} application. Installing or updating a static plugin requires a restart of the application after installing the plugin. + +The following table provides a comparison between static and dynamic plugins in {product-very-short}. + +[%header,cols=3*] +|=== +|*Feature* |*Static plugins* |*Dynamic plugins* +|Integration |Built into the core application. |Loaded at runtime, separate from the core. +|Flexibility |Requires core changes to add or update features. |Add or update features without core changes. +|Development speed |Slower, requires a complete rebuild for new +features. |Faster, deploy new functionalities quickly. +|Customization |Limited to predefined options. |Easy to tailor platform by loading specific plugins. +|Maintenance |More complex due to tightly coupled features. |Enhanced by modular architecture. +|Resource use |All features loaded at startup. |Only necessary plugins loaded dynamically. +|Innovation |Slower experimentation due to rebuild cycles. |Quick experimentation with new plugins. +|=== \ No newline at end of file diff --git a/modules/dynamic-plugins/proc-config-dynamic-plugins-rhdh-operator.adoc b/modules/dynamic-plugins/proc-config-dynamic-plugins-rhdh-operator.adoc new file mode 100644 index 0000000000..08cbbf40c6 --- /dev/null +++ b/modules/dynamic-plugins/proc-config-dynamic-plugins-rhdh-operator.adoc @@ -0,0 +1,98 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-install-rhdh-ocp.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-config-dynamic-plugins-rhdh-operator_{context}"] += Installing dynamic plugins with the {product} Operator + +You can store the configuration for dynamic plugins in a `ConfigMap` object that your `Backstage` custom resource (CR) can reference. + +[NOTE] +==== +If the `pluginConfig` field references environment variables, you must define the variables in your `secrets-rhdh` secret. +==== + +.Procedure + +. From the {ocp-short} web console, select the *ConfigMaps* tab. +. Click *Create ConfigMap*. +. From the *Create ConfigMap* page, select the *YAML view* option in *Configure via* and edit the file, if needed. ++ +.Example `ConfigMap` object using the GitHub dynamic plugin +[source, yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dynamic-plugins-rhdh +data: + dynamic-plugins.yaml: | + includes: + - dynamic-plugins.default.yaml + plugins: + - package: './dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic' + disabled: false + pluginConfig: + catalog: + providers: + github: + organization: "${GITHUB_ORG}" + schedule: + frequency: { minutes: 1 } + timeout: { minutes: 1 } + initialDelay: { seconds: 100 } +---- + +. Click *Create*. +. Go to the *Topology* view. +. Click on the overflow menu for the {product} instance that you want to use and select *Edit Backstage* to load the YAML view of the {product} instance. ++ +image::rhdh/operator-install-2.png[] + +. Add the `dynamicPluginsConfigMapName` field to your `Backstage` CR. For example: ++ +[source,yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: my-rhdh +spec: + application: +# ... + dynamicPluginsConfigMapName: dynamic-plugins-rhdh +# ... +---- +. Click *Save*. +. Navigate back to the *Topology* view and wait for the {product} pod to start. +. Click the *Open URL* icon to start using the {product} platform with the new configuration changes. + +.Verification + +* Ensure that the dynamic plugins configuration has been loaded, by appending `/api/dynamic-plugins-info/loaded-plugins` to your {product} root URL and checking the list of plugins: ++ +.Example list of plugins +[source,json] +---- +[ + { + "name": "backstage-plugin-catalog-backend-module-github-dynamic", + "version": "0.5.2", + "platform": "node", + "role": "backend-plugin-module" + }, + { + "name": "backstage-plugin-techdocs", + "version": "1.10.0", + "role": "frontend-plugin", + "platform": "web" + }, + { + "name": "backstage-plugin-techdocs-backend-dynamic", + "version": "1.9.5", + "platform": "node", + "role": "backend-plugin" + }, +] +---- diff --git a/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc b/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc new file mode 100644 index 0000000000..f0434f2487 --- /dev/null +++ b/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc @@ -0,0 +1,37 @@ +[id="proc-installing-and-configuring-redis-cache_{context}"] += Using Redis Cache with dynamic plugins +You can use the Redis cache store to improve {product-very-short} performance and reliability. Plugins in {product-very-short} receive dedicated cache connections, which are powered by Keyv. + +== Installing Redis Cache in {product} + +.Prerequisites +* You have installed Red Hat Developer Hub by using either the Operator or Helm chart. +* You have an active Redis server. For more information on setting up an external Redis server, see the link:https://www.redis.io/docs/latest/[`Redis official documentation`]. + +.Procedure +Add the following code to your `app-config.yaml` file: +[source, yaml] +---- +backend: + cache: + store: redis + connection: redis://user:pass@cache.example.com:6379 + useRedisSets: true +---- + +== Configuring Redis Cache in {product} +=== useRedisSets +The `useRedisSets` option lets you decide whether to use Redis sets for key management. By default, this option is set to `true`. + +When `useRedisSets` is enabled (`true`): + +* A namespace for the Redis sets is created, and all generated keys are added to that namespace, enabling group management of the keys. + +* When a key is deleted, it's removed from the main storage and the Redis set. + +* When using the clear function to delete all keys, every key in the Redis set is checked for deletion, and the set itself is also removed. + +[NOTE] +In high-performance scenarios, enabling `useRedisSets` can result in memory leaks. If you are running a high-performance application or service, you must set `useRedisSets` to `false`. + +When you set `useRedisSets` to `false`, the keys are handled individually and Redis sets are not utilized. This configuration might lead to performance issues in production when using the `clear` function, as it requires iterating over all keys for deletion. \ No newline at end of file diff --git a/modules/dynamic-plugins/proc-obtaining-integrity-checksum.adoc b/modules/dynamic-plugins/proc-obtaining-integrity-checksum.adoc new file mode 100644 index 0000000000..3b35ad2a07 --- /dev/null +++ b/modules/dynamic-plugins/proc-obtaining-integrity-checksum.adoc @@ -0,0 +1,9 @@ +[id="proc-obtaining-integrity-checksum"] + += Obtaining the integrity checksum + +To obtain the integrity checksum, enter the following command: + +---- +npm view @ dist.integrity +---- diff --git a/modules/dynamic-plugins/proc-rhdh-example-external-dynamic-plugins.adoc b/modules/dynamic-plugins/proc-rhdh-example-external-dynamic-plugins.adoc new file mode 100644 index 0000000000..d0c65e9abc --- /dev/null +++ b/modules/dynamic-plugins/proc-rhdh-example-external-dynamic-plugins.adoc @@ -0,0 +1,53 @@ +[id="proc-install-external-dynamic-plugins-helm"] + += Installing external dynamic plugins using a Helm chart + +The NPM registry contains external dynamic plugins that you can use for demonstration purposes. For example, the following community plugins are available in the `janus-idp` organization in the NPMJS repository: + +* Notifications (frontend and backend) +* Kubernetes actions (scaffolder actions) + +To install the Notifications and Kubernetes actions plugins, include them in the Helm chart values in the `global.dynamic.plugins` list as shown in the following example: + +[source,yaml] +---- +global: + dynamic: + plugins: + - package: '@janus-idp/plugin-notifications-backend-dynamic@1.3.6' + # Integrity can be found at https://registry.npmjs.org/@janus-idp/plugin-notifications-backend-dynamic + integrity: 'sha512-Qd8pniy1yRx+x7LnwjzQ6k9zP+C1yex24MaCcx7dGDPT/XbTokwoSZr4baSSn8jUA6P45NUUevu1d629mG4JGQ==' + - package: '@janus-idp/plugin-notifications@1.1.12' + # https://registry.npmjs.org/@janus-idp/plugin-notifications + integrity: 'sha512-GCdEuHRQek3ay428C8C4wWgxjNpNwCXgIdFbUUFGCLLkBFSaOEw+XaBvWaBGtQ5BLgE3jQEUxa+422uzSYC5oQ==' + pluginConfig: + dynamicPlugins: + frontend: + janus-idp.backstage-plugin-notifications: + appIcons: + - name: notificationsIcon + module: NotificationsPlugin + importName: NotificationsActiveIcon + dynamicRoutes: + - path: /notifications + importName: NotificationsPage + module: NotificationsPlugin + menuItem: + icon: notificationsIcon + text: Notifications + config: + pollingIntervalMs: 5000 + - package: '@janus-idp/backstage-scaffolder-backend-module-kubernetes-dynamic@1.3.5' + # https://registry.npmjs.org/@janus-idp/backstage-scaffolder-backend-module-kubernetes-dynamic + integrity: 'sha512-19ie+FM3QHxWYPyYzE0uNdI5K8M4vGZ0SPeeTw85XPROY1DrIY7rMm2G0XT85L0ZmntHVwc9qW+SbHolPg/qRA==' + proxy: + endpoints: + /explore-backend-completed: + target: 'http://localhost:7017' + - package: '@dfatwork-pkgs/search-backend-module-explore-wrapped-dynamic@0.1.3-next.1' + # https://registry.npmjs.org/@dfatwork-pkgs/search-backend-module-explore-wrapped-dynamic + integrity: 'sha512-mv6LS8UOve+eumoMCVypGcd7b/L36lH2z11tGKVrt+m65VzQI4FgAJr9kNCrjUZPMyh36KVGIjYqsu9+kgzH5A==' + - package: '@dfatwork-pkgs/plugin-catalog-backend-module-test-dynamic@0.0.0' + # https://registry.npmjs.org/@dfatwork-pkgs/plugin-catalog-backend-module-test-dynamic + integrity: 'sha512-YsrZMThxJk7cYJU9FtAcsTCx9lCChpytK254TfGb3iMAYQyVcZnr5AA/AU+hezFnXLsr6gj8PP7z/mCZieuuDA==' +---- \ No newline at end of file diff --git a/modules/dynamic-plugins/proc-rhdh-installing-external-dynamic-plugins-airgapped.adoc b/modules/dynamic-plugins/proc-rhdh-installing-external-dynamic-plugins-airgapped.adoc new file mode 100644 index 0000000000..3ddbcfc54c --- /dev/null +++ b/modules/dynamic-plugins/proc-rhdh-installing-external-dynamic-plugins-airgapped.adoc @@ -0,0 +1,5 @@ +[id="proc-rhdh-installing-external-dynamic-plugins-airgapped"] + += Installing external plugins in an air-gapped environment + +You can install external plugins in an air-gapped environment by setting up a custom NPM registry. To configure the NPM registry URL and authentication information for dynamic plugin packages, see xref:proc-using-custom-npm-registry[Using a custom NPM registry for dynamic plugin packages]. diff --git a/modules/dynamic-plugins/proc-topology-configure.adoc b/modules/dynamic-plugins/proc-topology-configure.adoc new file mode 100644 index 0000000000..eee7de31a5 --- /dev/null +++ b/modules/dynamic-plugins/proc-topology-configure.adoc @@ -0,0 +1,333 @@ += Configuration + +== Viewing OpenShift routes +To view OpenShift routes, you must grant read access to the routes resource in the Cluster Role: + +[source,yaml] +---- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - get + - list +---- + +You must also add the following in `kubernetes.customResources` property in your `app-config.yaml` file: + +[source,yaml] +---- +kubernetes: + ... + customResources: + - group: 'route.openshift.io' + apiVersion: 'v1' + plural: 'routes' +---- + +== Viewing pod logs +To view pod logs, you must grant the following permission to the `ClusterRole`: + +[source,yaml] +---- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - '' + resources: + - pods + - pods/log + verbs: + - get + - list + - watch +---- + +== Viewing Tekton PipelineRuns +To view the Tekton PipelineRuns you must grant read access to the `pipelines`, `pipelinesruns`, and `taskruns` resources in the `ClusterRole`: + +[source,yaml] +---- + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - tekton.dev + resources: + - pipelines + - pipelineruns + - taskruns + verbs: + - get + - list +---- + +To view the Tekton PipelineRuns list in the side panel and the latest PipelineRuns status in the Topology node decorator, you must add the following code to the `kubernetes.customResources` property in your `app-config.yaml` file: + +[source,yaml] +---- +kubernetes: + ... + customResources: + - group: 'tekton.dev' + apiVersion: 'v1' + plural: 'pipelines' + - group: 'tekton.dev' + apiVersion: 'v1' + plural: 'pipelineruns' + - group: 'tekton.dev' + apiVersion: 'v1' + plural: 'taskruns' +---- + +== Viewing virtual machines +To view virtual machines, the OpenShift Virtualization operator must be installed and configured on a Kubernetes cluster. +You must also grant read access to the `VirtualMachines` resource in the `ClusterRole`: + +[source,yaml] +---- + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + verbs: + - get + - list +---- + +To view the virtual machine nodes on the topology plugin, you must add the following code to the `kubernetes.customResources` property in the `app-config.yaml` file: + +[source,yaml] +---- +kubernetes: + ... + customResources: + - group: 'kubevirt.io' + apiVersion: 'v1' + plural: 'virtualmachines' + - group: 'kubevirt.io' + apiVersion: 'v1' + plural: 'virtualmachineinstances' +---- + +== Enabling the source code editor +To enable the source code editor, you must grant read access to the CheClusters resource in the `ClusterRole` as shown in the following example code: + +[source,yaml] +---- + ... + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: backstage-read-only + rules: + ... + - apiGroups: + - org.eclipse.che + resources: + - checlusters + verbs: + - get + - list +---- + +To use the source code editor, you must add the following configuration to the `kubernetes.customResources` property in your `app-config.yaml` file: + +[source,yaml] +---- + kubernetes: + ... + customResources: + - group: 'org.eclipse.che' + apiVersion: 'v2' + plural: 'checlusters' +---- + +== Labels and annotations +=== Linking to the source code editor or the source +Add the following annotations to workload resources, such as Deployments to navigate to the Git repository of the associated application using the source code editor: + +[source,yaml] +---- +annotations: + app.openshift.io/vcs-uri: +---- + +Add the following annotation to navigate to a specific branch: + +[source,yaml] +---- +annotations: + app.openshift.io/vcs-ref: +---- + +[NOTE] +==== +If Red Hat OpenShift Dev Spaces is installed and configured and git URL annotations are also added to the workload YAML file, then clicking on the edit code decorator redirects you to the Red Hat OpenShift Dev Spaces instance. +==== + +[NOTE] +==== +When you deploy your application using the OCP Git import flows, then you do not need to add the labels as import flows do that. Otherwise, you need to add the labels manually to the workload YAML file. +==== + +//The labels are not similar to `backstage.io/edit-url` annotations as it points to the catalog entity metadata source file and is applied to RHDH catalog entity metadata YAML file, but not Kubernetes resources. + +You can also add the `app.openshift.io/edit-url` annotation with the edit URL that you want to access using the decorator. + +=== Entity annotation/label +For RHDH to detect that an entity has Kubernetes components, add the following annotation to the entity's `catalog-info.yaml`: + +[source,yaml] +---- +annotations: + backstage.io/kubernetes-id: +---- + +The following label is added to the resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity, add the following label to the resources: + +[source,yaml] +---- +labels: + backstage.io/kubernetes-id: ` +---- + +[NOTE] +==== +When using the label selector, the mentioned labels must be present on the resource. +==== + +=== Namespace annotation +To identify the Kubernetes resources using the defined namespace, add the `backstage.io/kubernetes-namespace` annotation: + +[source,yaml] +---- +annotations: + backstage.io/kubernetes-namespace: +---- + +The Red Hat OpenShift Dev Spaces instance is not accessible using the source code editor if the `backstage.io/kubernetes-namespace` annotation is added to the `catalog-info.yaml` file. + +To retrieve the instance URL, you require the CheCluster Custom Resource (CR). As the CheCluster CR is created in the openshift-devspaces namespace, the instance URL is not retrieved if the namespace annotation value is not openshift-devspaces. + +=== Label selector query annotation +You can write your own custom label, which RHDH uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations: + +[source,yaml] +---- +annotations: + backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end' +---- + +If you have multiple entities while Red Hat Dev Spaces is configured and want multiple entities to support the edit code decorator that redirects to the Red Hat Dev Spaces instance, you can add the backstage.io/kubernetes-label-selector annotation to the catalog-info.yaml file for each entity. + +[source,yaml] +---- +annotations: + backstage.io/kubernetes-label-selector: 'component in (,che)' +---- + +If you are using the previous label selector, you must add the following labels to your resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity: + +[source,yaml] +---- +labels: + component: che # add this label to your che cluster instance +labels: + component: # add this label to the other resources associated with your entity +---- + +You can also write your own custom query for the label selector with unique labels to differentiate your entities. However, you need to ensure that you add those labels to the resources associated with your entities including your CheCluster instance. + +=== Icon displayed in the node +To display a runtime icon in the topology nodes, add the following label to workload resources, such as Deployments: + +[source,yaml] +---- +labels: + app.openshift.io/runtime: +---- +Alternatively, you can include the following label to display the runtime icon: + +[source,yaml] +---- +labels: + app.kubernetes.io/name: +---- + +Supported values of `` include: + +* django +* dotnet +* drupal +* go-gopher +* golang +* grails +* jboss +* jruby +* js +* nginx +* nodejs +* openjdk +* perl +* phalcon +* php +* python +* quarkus +* rails +* redis +* rh-spring-boot +* rust +* java +* rh-openjdk +* ruby +* spring +* spring-boot + +[NOTE] +==== +Other values result in icons not being rendered for the node. +==== + +=== App grouping +To display workload resources such as deployments or pods in a visual group, add the following label: + +[source,yaml] +---- +labels: + app.kubernetes.io/part-of: +---- + +=== Node connector +To display the workload resources such as deployments or pods with a visual connector, add the following annotation: + +[source,yaml] +---- +annotations: + app.openshift.io/connects-to: '[{"apiVersion": ,"kind": ,"name": }]' +---- + +For more information about the labels and annotations, see _Guidelines for labels and annotations for OpenShift applications_. \ No newline at end of file diff --git a/modules/dynamic-plugins/proc-topology-install.adoc b/modules/dynamic-plugins/proc-topology-install.adoc new file mode 100644 index 0000000000..dbb099975d --- /dev/null +++ b/modules/dynamic-plugins/proc-topology-install.adoc @@ -0,0 +1,25 @@ += Installation +The Topology plugin enables you to visualize the workloads such as Deployment, Job, Daemonset, Statefulset, CronJob, Pods and Virtual Machines powering any service on your Kubernetes cluster. + +.Prerequisites +* You have installed and configured the @backstage/plugin-kubernetes-backend dynamic plugins. +* You have configured the Kubernetes plugin to connect to the cluster using a ServiceAccount. +* The `ClusterRole` must be granted to ServiceAccount accessing the cluster. +[NOTE] +If you have the {product-short} Kubernetes plugin configured, then the `ClusterRole` is already granted. + +.Procedure +* The Topology plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the disabled property to false as follows: ++ +.`app-config.yaml` fragment +[source,yaml] +---- +auth: +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-topology + disabled: false +---- \ No newline at end of file diff --git a/modules/dynamic-plugins/proc-using-custom-npm-registry.adoc b/modules/dynamic-plugins/proc-using-custom-npm-registry.adoc new file mode 100644 index 0000000000..16aefcddb4 --- /dev/null +++ b/modules/dynamic-plugins/proc-using-custom-npm-registry.adoc @@ -0,0 +1,24 @@ +[id="proc-using-custom-npm-registry"] + +//= Using a custom NPM registry for dynamic plugin packages += Installing dynamic plugins in an air-gapped environment + +You can install external plugins in an air-gapped environment by setting up a custom NPM registry. + +You can configure the NPM registry URL and authentication information for dynamic plugin packages using a Helm chart. For dynamic plugin packages obtained through `npm pack`, you can use a `.npmrc` file. + +Using the Helm chart, add the `.npmrc` file to the NPM registry by creating a secret named `dynamic-plugins-npmrc` with the following content: + +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: dynamic-plugins-npmrc +type: Opaque +stringData: + .npmrc: | + registry= + //:_authToken= + ... +---- diff --git a/modules/dynamic-plugins/proc-using-topology-plugin.adoc b/modules/dynamic-plugins/proc-using-topology-plugin.adoc new file mode 100644 index 0000000000..ab33ae1699 --- /dev/null +++ b/modules/dynamic-plugins/proc-using-topology-plugin.adoc @@ -0,0 +1,36 @@ += Using the Topology plugin +Topology is a front-end plugin that enables you to view the workloads as nodes that power any service on the Kubernetes cluster. + +.Prerequisites +* You have installed the Red Hat Developer Hub (RHDH). +* You have installed the Topology plugin. +//For the installation process, see Installation. +* If the RBAC permission framework is enabled, ensure to add the following permission policies in an external permission policies configuration file named `rbac-policy.csv` to allow the RBAC admins or your desired user(s)/group(s) to access the Topology plugin: ++ +[source,bash] +---- +g, user:default/, role:default/topology-viewer +p, role:default/topology-viewer, topology.view.read, read, allow +p, role:default/topology-viewer, kubernetes.proxy, use, allow +p, role:default/topology-viewer, catalog-entity, read, allow +p, role:default/topology-viewer, topology.view.read, read, allow grants the user the ability to see the Topology panel. p, role:default/topology-viewer, kubernetes.proxy, use, allow grants the user the ability to view the pod logs. p, role:default/topology-viewer, catalog-entity, read, allow grants the user the ability to see the catalog item. +---- + +.Procedure + +. Open your {product-very-short} application and select a component from the *Catalog* page. +. Go to the *TOPOLOGY* tab and you can view the workloads such as deployments or pods as nodes. ++ +image::rhdh-plugins-reference/topology-tab-user1.png[topology-user-1] + +. Select a node and a pop-up appears on the right side, which contains two tabs: *Details* and *Resources*. + +. The *Details* and *Resources* tabs contain the associated information and resources of the node. ++ +image::rhdh-plugins-reference/topology-tab-user2.png[topology-user-2] + +. Click on the *Open URL* button on the top of a node. ++ +image::rhdh-plugins-reference/topology-tab-user3.png[topology-user-3] ++ +When you click on the *Open URL* button, it allows you to access the associated *Ingresses* and runs your application in a new tab. diff --git a/modules/dynamic-plugins/proc-viewing-installed-plugins.adoc b/modules/dynamic-plugins/proc-viewing-installed-plugins.adoc new file mode 100644 index 0000000000..9894dbf9c0 --- /dev/null +++ b/modules/dynamic-plugins/proc-viewing-installed-plugins.adoc @@ -0,0 +1,15 @@ +// Module included in the following assemblies: +// +// assemblies/assembly-rhdh-installing-dynamic-plugins.adoc +// assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-viewing-installed-plugins_{context}"] += Viewing installed plugins + +Using the Dynamic Plugins Info front-end plugin, you can view plugins that are currently installed in your {product} application. This plugin is enabled by default. + +.Procedure + +. Open your {product-short} application and click *Administration*. +. Go to the *Plugins* tab to view a list of installed plugins and related information. diff --git a/modules/dynamic-plugins/ref-community-plugins.adoc b/modules/dynamic-plugins/ref-community-plugins.adoc new file mode 100644 index 0000000000..50241388f2 --- /dev/null +++ b/modules/dynamic-plugins/ref-community-plugins.adoc @@ -0,0 +1,236 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += Community plugins + +[IMPORTANT] +==== +{product} ({product-very-short}) includes a select number of community-supported plugins, available for customers to enable and configure. These community plugins are augmented by {company-name} to be dynamic plugin capable, and are provided with support scoped per Technical Preview terms. + +Details on how {company-name} provides support for bundled community dynamic plugins are available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +{product-very-short} includes the following 43 community plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +|3scale |`https://npmjs.com/package/@backstage-community/plugin-3scale-backend/v/1.8.2[@backstage-community/plugin-3scale-backend]` |1.8.2 +|`./dynamic-plugins/dist/backstage-community-plugin-3scale-backend-dynamic` + +`THREESCALE_BASE_URL` + +`THREESCALE_ACCESS_TOKEN` + + +|Argo CD |`https://npmjs.com/package/@roadiehq/scaffolder-backend-argocd/v/1.1.27[@roadiehq/scaffolder-backend-argocd]` |1.1.27 +|`./dynamic-plugins/dist/roadiehq-scaffolder-backend-argocd-dynamic` + +`ARGOCD_USERNAME` + +`ARGOCD_PASSWORD` + +`ARGOCD_INSTANCE1_URL` + +`ARGOCD_AUTH_TOKEN` + +`ARGOCD_INSTANCE2_URL` + +`ARGOCD_AUTH_TOKEN2` + + +|Argo CD (Red Hat) |`https://npmjs.com/package/@backstage-community/plugin-redhat-argocd/v/1.8.3[@backstage-community/plugin-redhat-argocd]` |1.8.3 +|`./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd` + + +|Azure |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-azure/v/0.1.14[@backstage/plugin-scaffolder-backend-module-azure]` |0.1.14 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-azure-dynamic` + + +|Azure Devops |`https://npmjs.com/package/@backstage/plugin-azure-devops/v/0.4.4[@backstage/plugin-azure-devops]` |0.4.4 +|`./dynamic-plugins/dist/backstage-plugin-azure-devops` + + +|Azure Devops |`https://npmjs.com/package/@backstage/plugin-azure-devops-backend/v/0.6.5[@backstage/plugin-azure-devops-backend]` |0.6.5 +|`./dynamic-plugins/dist/backstage-plugin-azure-devops-backend-dynamic` + +`AZURE_TOKEN` + +`AZURE_ORG` + + +|Azure Repositories |`https://npmjs.com/package/@parfuemerie/douglas-scaffolder-backend-module-azure-repositories/v/0.3.0[@parfuemerie/douglas-scaffolder-backend-module-azure-repositories]` |0.3.0 +|`./dynamic-plugins/dist/parfuemerie-douglas-scaffolder-backend-module-azure-repositories` + + +|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-cloud/v/0.2.9[@backstage/plugin-catalog-backend-module-bitbucket-cloud]` |0.2.9 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic` + +`BITBUCKET_WORKSPACE` + + +|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-cloud/v/0.1.12[@backstage/plugin-scaffolder-backend-module-bitbucket-cloud]` |0.1.12 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-cloud-dynamic` + + +|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-server/v/0.1.36[@backstage/plugin-catalog-backend-module-bitbucket-server]` |0.1.36 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-server-dynamic` + +`BITBUCKET_HOST` + + +|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-server/v/0.1.12[@backstage/plugin-scaffolder-backend-module-bitbucket-server]` |0.1.12 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-server-dynamic` + + +|Bulk Import |`https://npmjs.com/package/@janus-idp/backstage-plugin-bulk-import-backend/v/1.5.4[@janus-idp/backstage-plugin-bulk-import-backend]` |1.5.4 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import-backend-dynamic` + + +|Datadog |`https://npmjs.com/package/@roadiehq/backstage-plugin-datadog/v/2.3.0[@roadiehq/backstage-plugin-datadog]` |2.3.0 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-datadog` + + +|Dynatrace |`https://npmjs.com/package/@backstage/plugin-dynatrace/v/10.0.4[@backstage/plugin-dynatrace]` |10.0.4 +|`./dynamic-plugins/dist/backstage-plugin-dynatrace` + + +|Gerrit |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gerrit/v/0.1.14[@backstage/plugin-scaffolder-backend-module-gerrit]` |0.1.14 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gerrit-dynamic` + + +|GitHub |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-github/v/0.4.0[@backstage/plugin-scaffolder-backend-module-github]` |0.4.0 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic` + + +|GitHub Actions |`https://npmjs.com/package/@backstage/plugin-github-actions/v/0.6.16[@backstage/plugin-github-actions]` |0.6.16 +|`./dynamic-plugins/dist/backstage-plugin-github-actions` + + +|GitHub Insights |`https://npmjs.com/package/@roadiehq/backstage-plugin-github-insights/v/2.3.31[@roadiehq/backstage-plugin-github-insights]` |2.3.31 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-github-insights` + + +|GitHub Issues |`https://npmjs.com/package/@backstage/plugin-github-issues/v/0.4.2[@backstage/plugin-github-issues]` |0.4.2 +|`./dynamic-plugins/dist/backstage-plugin-github-issues` + + +|GitHub Pull Requests |`https://npmjs.com/package/@roadiehq/backstage-plugin-github-pull-requests/v/2.5.29[@roadiehq/backstage-plugin-github-pull-requests]` |2.5.29 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-github-pull-requests` + + +|GitLab |`https://npmjs.com/package/@immobiliarelabs/backstage-plugin-gitlab/v/6.6.0[@immobiliarelabs/backstage-plugin-gitlab]` |6.6.0 +|`./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab` + + +|GitLab |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab/v/0.3.21[@backstage/plugin-catalog-backend-module-gitlab]` |0.3.21 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-dynamic` + + +|GitLab |`https://npmjs.com/package/@immobiliarelabs/backstage-plugin-gitlab-backend/v/6.6.0[@immobiliarelabs/backstage-plugin-gitlab-backend]` |6.6.0 +|`./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab-backend-dynamic` + +`GITLAB_HOST` + +`GITLAB_TOKEN` + + +|GitLab |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gitlab/v/0.4.4[@backstage/plugin-scaffolder-backend-module-gitlab]` |0.4.4 +|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gitlab-dynamic` + + +|GitLab Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab-org/v/0.0.5[@backstage/plugin-catalog-backend-module-gitlab-org]` |0.0.5 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-org-dynamic` + + +|Http Request |`https://npmjs.com/package/@roadiehq/scaffolder-backend-module-http-request/v/4.3.2[@roadiehq/scaffolder-backend-module-http-request]` |4.3.2 +|`./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-http-request-dynamic` + + +|Jenkins |`https://npmjs.com/package/@backstage/plugin-jenkins/v/0.9.10[@backstage/plugin-jenkins]` |0.9.10 +|`./dynamic-plugins/dist/backstage-plugin-jenkins` + + +|Jenkins |`https://npmjs.com/package/@backstage/plugin-jenkins-backend/v/0.4.5[@backstage/plugin-jenkins-backend]` |0.4.5 +|`./dynamic-plugins/dist/backstage-plugin-jenkins-backend-dynamic` + +`JENKINS_URL` + +`JENKINS_USERNAME` + +`JENKINS_TOKEN` + + +|Jira |`https://npmjs.com/package/@roadiehq/backstage-plugin-jira/v/2.5.8[@roadiehq/backstage-plugin-jira]` |2.5.8 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-jira` + + +|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes/v/0.11.12[@backstage/plugin-kubernetes]` |0.11.12 +|`./dynamic-plugins/dist/backstage-plugin-kubernetes` + + +|Ldap |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-ldap/v/0.7.0[@backstage/plugin-catalog-backend-module-ldap]` |0.7.0 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-ldap-dynamic` + + +|Lighthouse |`https://npmjs.com/package/@backstage/plugin-lighthouse/v/0.4.20[@backstage/plugin-lighthouse]` |0.4.20 +|`./dynamic-plugins/dist/backstage-plugin-lighthouse` + + +|Logs |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-logs/v/0.0.1[@backstage/plugin-catalog-backend-module-logs]` |0.0.1 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-logs-dynamic` + + +|MS Graph |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-msgraph/v/0.5.30[@backstage/plugin-catalog-backend-module-msgraph]` |0.5.30 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-msgraph-dynamic` + + +|PagerDuty |`https://npmjs.com/package/@pagerduty/backstage-plugin/v/0.15.0[@pagerduty/backstage-plugin]` |0.15.0 +|`./dynamic-plugins/dist/pagerduty-backstage-plugin` + + +|PagerDuty |`https://npmjs.com/package/@pagerduty/backstage-plugin-backend/v/0.6.1[@pagerduty/backstage-plugin-backend]` |0.6.1 +|`./dynamic-plugins/dist/pagerduty-backstage-plugin-backend-dynamic` + +`PAGERDUTY_API_BASE` + +`PAGERDUTY_CLIENT_ID` + +`PAGERDUTY_CLIENT_SECRET` + +`PAGERDUTY_SUBDOMAIN` + + +|Pingidentity |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-pingidentity/v/0.1.2[@backstage-community/plugin-catalog-backend-module-pingidentity]` |0.1.2 +|`./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-pingidentity-dynamic` + + +|Scaffolder Relation Processor |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor/v/1.2.6[@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor]` |1.2.6 +|`./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-scaffolder-relation-processor-dynamic` + + +|Security Insights |`https://npmjs.com/package/@roadiehq/backstage-plugin-security-insights/v/2.3.19[@roadiehq/backstage-plugin-security-insights]` |2.3.19 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-security-insights` + + +|SonarQube |`https://npmjs.com/package/@backstage/plugin-sonarqube/v/0.7.17[@backstage/plugin-sonarqube]` |0.7.17 +|`./dynamic-plugins/dist/backstage-plugin-sonarqube` + + +|SonarQube |`https://npmjs.com/package/@backstage/plugin-sonarqube-backend/v/0.2.20[@backstage/plugin-sonarqube-backend]` |0.2.20 +|`./dynamic-plugins/dist/backstage-plugin-sonarqube-backend-dynamic` + +`SONARQUBE_URL` + +`SONARQUBE_TOKEN` + + +|Tech Radar |`https://npmjs.com/package/@backstage/plugin-tech-radar/v/0.7.4[@backstage/plugin-tech-radar]` |0.7.4 +|`./dynamic-plugins/dist/backstage-plugin-tech-radar` + + +|Utils |`https://npmjs.com/package/@roadiehq/scaffolder-backend-module-utils/v/1.17.1[@roadiehq/scaffolder-backend-module-utils]` |1.17.1 +|`./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-utils-dynamic` + + +|=== diff --git a/modules/dynamic-plugins/ref-community-plugins.template.adoc b/modules/dynamic-plugins/ref-community-plugins.template.adoc new file mode 100644 index 0000000000..2ae02c8325 --- /dev/null +++ b/modules/dynamic-plugins/ref-community-plugins.template.adoc @@ -0,0 +1,19 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += Community plugins + +[IMPORTANT] +==== +{product} ({product-very-short}) includes a select number of community-supported plugins, available for customers to enable and configure. These community plugins are augmented by {company-name} to be dynamic plugin capable, and are provided with support scoped per Technical Preview terms. + +Details on how {company-name} provides support for bundled community dynamic plugins are available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page. +==== + +{product-very-short} includes the following %%COUNT_3%% community plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +%%TABLE_CONTENT_3%% +|=== diff --git a/modules/dynamic-plugins/ref-example-dynamic-plugin-helm-installations.adoc b/modules/dynamic-plugins/ref-example-dynamic-plugin-helm-installations.adoc new file mode 100644 index 0000000000..773d51f359 --- /dev/null +++ b/modules/dynamic-plugins/ref-example-dynamic-plugin-helm-installations.adoc @@ -0,0 +1,53 @@ +[id="ref-example-dynamic-plugin-helm-installations"] + += Example Helm chart configurations for dynamic plugin installations + +The following examples demonstrate how to configure the Helm chart for specific types of dynamic plugin installations. + +.Configuring a local plugin and an external plugin when the external plugin requires a specific app-config +[source,yaml] +---- +global: + dynamic: + plugins: + - package: + - package: + integrity: sha512- + pluginConfig: ... +---- + +.Disabling a plugin from an included file +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: + disabled: true +---- + +.Enabling a plugin from an included file +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: + disabled: false +---- + +.Enabling a plugin that is disabled in an included file +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: + disabled: false +---- diff --git a/modules/dynamic-plugins/ref-rh-compatible-plugins.adoc b/modules/dynamic-plugins/ref-rh-compatible-plugins.adoc new file mode 100644 index 0000000000..197587fb04 --- /dev/null +++ b/modules/dynamic-plugins/ref-rh-compatible-plugins.adoc @@ -0,0 +1,66 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += Other installable plugins + +The following Technology Preview plugins are not preinstalled and must be installed from an external source: + +[%header,cols=4*] +|=== +|*Name* |*Plugin*|*Version* |*Installation Details* + +|Ansible Automation Platform Frontend|`https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-backstage-rhaap]` |1.0.0 +| https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/installing_ansible_plug-ins_for_red_hat_developer_hub[Learn more] + +|Ansible Automation Platform +| `https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-backstage-rhaap-backend]` |1.0.0 | + +|Ansible Automation Platform Scaffolder Backend +|`https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-scaffolder-backend-module-backstage-rhaap]` |1.0.0 | + + +|Orchestrator Frontend|`https://www.npmjs.com/package/@janus-idp/backstage-plugin-orchestrator[@janus-idp/backstage-plugin-orchestrator]` |1.22.6| https://www.parodos.dev/1.2-rc/docs/[Learn more] + +|Orchestrator Backend +|`https://www.npmjs.com/package/@janus-idp/backstage-plugin-orchestrator-backend-dynamic[@janus-idp/backstage-plugin-orchestrator-backend-dynamic]` |1.22.9 | + + +|=== + +// Without description - for consistency i.e. no descriptions in other table and we provide a 'Learn more' link to plugin documentation for users' convenience. +//// +[%header,cols=5*] +|=== +|*Name* |*Description*|*Plugin*|*Version* |*Installation Details* + +|Ansible Automation Platform Frontend +.3+|Ansible plug-ins for RHDH delivers an Ansible-specific portal experience with curated learning paths, push-button content creation, integrated development tools, and other opinionated resources. +|`https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-backstage-rhaap]` |1.0.0 +.3+| https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/installing_ansible_plug-ins_for_red_hat_developer_hub[Learn more] + +|Ansible Automation Platform +| `https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-backstage-rhaap-backend]` |1.0.0 + +|Ansible Automation Platform Scaffolder Backend +|`https://access.redhat.com/downloads/content/480/ver=2.4/rhel---9/2.4/x86_64/product-software[@ansible/plugin-scaffolder-backend-module-backstage-rhaap]` |1.0.0 + + +|Orchestrator Frontend +.2+|Orchestrator brings serverless workflows into Red Hat Developer Hub, focusing on the journey for application migration to the cloud, on boarding developers, and user-made workflows of Backstage actions or external systems. +|`https://www.npmjs.com/package/@janus-idp/backstage-plugin-orchestrator[@janus-idp/backstage-plugin-orchestrator]` |1.22.6 +.2+| https://www.parodos.dev/1.2-rc/docs/[Learn more] + +|Orchestrator Backend +|`https://www.npmjs.com/package/@janus-idp/backstage-plugin-orchestrator-backend-dynamic[@janus-idp/backstage-plugin-orchestrator-backend-dynamic]` |1.22.9 + + +|=== +//// + + +[NOTE] +==== + +* The above Red Hat Ansible Automation Platform (RHAAP) plugins, can be used as a replacement for the older plugin listed in the link:{LinkPluginsGuide}#rhdh-tech-preview-plugins[Technology Preview plugins] section of the _{NameOfPluginsGuide} guide_. +==== + diff --git a/modules/dynamic-plugins/ref-rh-supported-plugins.adoc b/modules/dynamic-plugins/ref-rh-supported-plugins.adoc new file mode 100644 index 0000000000..5219891521 --- /dev/null +++ b/modules/dynamic-plugins/ref-rh-supported-plugins.adoc @@ -0,0 +1,123 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += {company-name} supported plugins + +{company-name} supports the following 17 plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +|Analytics Provider Segment |`https://npmjs.com/package/@janus-idp/backstage-plugin-analytics-provider-segment/v/1.7.2[@janus-idp/backstage-plugin-analytics-provider-segment]` |1.7.2 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment` + +`SEGMENT_WRITE_KEY` + +`SEGMENT_TEST_MODE` + + +|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd/v/2.6.5[@roadiehq/backstage-plugin-argo-cd]` |2.6.5 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd` + + +|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd-backend/v/3.0.3[@roadiehq/backstage-plugin-argo-cd-backend]` |3.0.3 +|`./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic` + +`ARGOCD_USERNAME` + +`ARGOCD_PASSWORD` + +`ARGOCD_INSTANCE1_URL` + +`ARGOCD_AUTH_TOKEN` + +`ARGOCD_INSTANCE2_URL` + +`ARGOCD_AUTH_TOKEN2` + + +|GitHub |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github/v/0.6.5[@backstage/plugin-catalog-backend-module-github]` |0.6.5 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic` + +`GITHUB_ORG` + + +|GitHub Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github-org/v/0.1.17[@backstage/plugin-catalog-backend-module-github-org]` |0.1.17 +|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-org-dynamic` + +`GITHUB_URL` + +`GITHUB_ORG` + + +|Keycloak |`https://npmjs.com/package/@janus-idp/backstage-plugin-keycloak-backend/v/1.13.2[@janus-idp/backstage-plugin-keycloak-backend]` |1.13.2 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-keycloak-backend-dynamic` + +`KEYCLOAK_BASE_URL` + +`KEYCLOAK_LOGIN_REALM` + +`KEYCLOAK_REALM` + +`KEYCLOAK_CLIENT_ID` + +`KEYCLOAK_CLIENT_SECRET` + + +|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes-backend/v/0.18.3[@backstage/plugin-kubernetes-backend]` |0.18.3 +|`./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic` + +`K8S_CLUSTER_NAME` + +`K8S_CLUSTER_URL` + +`K8S_CLUSTER_TOKEN` + + +|OCM |`https://npmjs.com/package/@janus-idp/backstage-plugin-ocm/v/4.4.4[@janus-idp/backstage-plugin-ocm]` |4.4.4 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-ocm` + + +|OCM |`https://npmjs.com/package/@janus-idp/backstage-plugin-ocm-backend/v/4.4.2[@janus-idp/backstage-plugin-ocm-backend]` |4.4.2 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-ocm-backend-dynamic` + +`OCM_HUB_NAME` + +`OCM_HUB_URL` + +`OCM_SA_TOKEN` + + +|Quay |`https://npmjs.com/package/@janus-idp/backstage-plugin-quay/v/1.11.7[@janus-idp/backstage-plugin-quay]` |1.11.7 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-quay` + + +|Quay |`https://npmjs.com/package/@janus-idp/backstage-scaffolder-backend-module-quay/v/1.7.1[@janus-idp/backstage-scaffolder-backend-module-quay]` |1.7.1 +|`./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-quay-dynamic` + + +|RBAC |`https://npmjs.com/package/@janus-idp/backstage-plugin-rbac/v/1.29.5[@janus-idp/backstage-plugin-rbac]` |1.29.5 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-rbac` + + +|Regex |`https://npmjs.com/package/@janus-idp/backstage-scaffolder-backend-module-regex/v/1.7.1[@janus-idp/backstage-scaffolder-backend-module-regex]` |1.7.1 +|`./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-regex-dynamic` + + +|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs/v/1.10.7[@backstage/plugin-techdocs]` |1.10.7 +|`./dynamic-plugins/dist/backstage-plugin-techdocs` + + +|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs-backend/v/1.10.9[@backstage/plugin-techdocs-backend]` |1.10.9 +|`./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic` + + +|Tekton |`https://npmjs.com/package/@janus-idp/backstage-plugin-tekton/v/3.12.7[@janus-idp/backstage-plugin-tekton]` |3.12.7 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-tekton` + + +|Topology |`https://npmjs.com/package/@janus-idp/backstage-plugin-topology/v/1.27.5[@janus-idp/backstage-plugin-topology]` |1.27.5 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-topology` + + +|=== diff --git a/modules/dynamic-plugins/ref-rh-supported-plugins.template.adoc b/modules/dynamic-plugins/ref-rh-supported-plugins.template.adoc new file mode 100644 index 0000000000..76faed5e61 --- /dev/null +++ b/modules/dynamic-plugins/ref-rh-supported-plugins.template.adoc @@ -0,0 +1,12 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += {company-name} supported plugins + +{company-name} supports the following %%COUNT_1%% plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +%%TABLE_CONTENT_1%% +|=== diff --git a/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc new file mode 100644 index 0000000000..35fef4029c --- /dev/null +++ b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc @@ -0,0 +1,49 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += {company-name} Technology Preview plugins + +{company-name} provides Technology Preview support for the following 7 plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +|Ansible Automation Platform (AAP) |`https://npmjs.com/package/@janus-idp/backstage-plugin-aap-backend/v/1.9.3[@janus-idp/backstage-plugin-aap-backend]` |1.9.3 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-aap-backend-dynamic` + +`AAP_BASE_URL` + +`AAP_AUTH_TOKEN` + + +|ACR |`https://npmjs.com/package/@janus-idp/backstage-plugin-acr/v/1.7.11[@janus-idp/backstage-plugin-acr]` |1.7.11 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-acr` + + +|Bulk Import |`https://npmjs.com/package/@janus-idp/backstage-plugin-bulk-import/v/1.4.9[@janus-idp/backstage-plugin-bulk-import]` |1.4.9 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import` + + +|JFrog Artifactory |`https://npmjs.com/package/@janus-idp/backstage-plugin-jfrog-artifactory/v/1.7.5[@janus-idp/backstage-plugin-jfrog-artifactory]` |1.7.5 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-jfrog-artifactory` + + +|Nexus Repository Manager |`https://npmjs.com/package/@janus-idp/backstage-plugin-nexus-repository-manager/v/1.9.5[@janus-idp/backstage-plugin-nexus-repository-manager]` |1.9.5 +|`./dynamic-plugins/dist/janus-idp-backstage-plugin-nexus-repository-manager` + + +|ServiceNow |`https://npmjs.com/package/@janus-idp/backstage-scaffolder-backend-module-servicenow/v/1.7.2[@janus-idp/backstage-scaffolder-backend-module-servicenow]` |1.7.2 +|`./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-servicenow-dynamic` + +`SERVICENOW_BASE_URL` + +`SERVICENOW_USERNAME` + +`SERVICENOW_PASSWORD` + + +|SonarQube |`https://npmjs.com/package/@janus-idp/backstage-scaffolder-backend-module-sonarqube/v/1.7.1[@janus-idp/backstage-scaffolder-backend-module-sonarqube]` |1.7.1 +|`./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-sonarqube-dynamic` + + +|=== diff --git a/modules/dynamic-plugins/ref-rh-tech-preview-plugins.template.adoc b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.template.adoc new file mode 100644 index 0000000000..7a429a71f5 --- /dev/null +++ b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.template.adoc @@ -0,0 +1,12 @@ +// This page is generated! Do not edit the .adoc file, but instead run rhdh-supported-plugins.sh to regen this page from the latest plugin metadata. +// cd /path/to/rhdh-documentation; ./modules/dynamic-plugins/rhdh-supported-plugins.sh; ./build/scripts/build.sh; google-chrome titles-generated/main/plugin-rhdh/index.html + += {company-name} Technology Preview plugins + +{company-name} provides Technology Preview support for the following %%COUNT_2%% plugins: + +[%header,cols=4*] +|=== +|*Name* |*Plugin* |*Version* |*Path and required variables* +%%TABLE_CONTENT_2%% +|=== diff --git a/modules/dynamic-plugins/rhdh-supported-plugins.csv b/modules/dynamic-plugins/rhdh-supported-plugins.csv new file mode 100644 index 0000000000..198acac55e --- /dev/null +++ b/modules/dynamic-plugins/rhdh-supported-plugins.csv @@ -0,0 +1,66 @@ +"Name","Plugin","Role","Version","Support Level","Path","Required Variables","Default" +"Analytics Provider Segment ","@janus-idp/backstage-plugin-analytics-provider-segment","Frontend","1.7.2","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment","`SEGMENT_WRITE_KEY`;`SEGMENT_TEST_MODE`;","Enabled" +"Argo CD ","@roadiehq/backstage-plugin-argo-cd","Frontend","2.6.5","Production","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd",";","Disabled" +"Argo CD ","@roadiehq/backstage-plugin-argo-cd-backend","Backend","3.0.3","Production","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic","`ARGOCD_USERNAME`;`ARGOCD_PASSWORD`;`ARGOCD_INSTANCE1_URL`;`ARGOCD_AUTH_TOKEN`;`ARGOCD_INSTANCE2_URL`;`ARGOCD_AUTH_TOKEN2`;","Disabled" +"GitHub ","@backstage/plugin-catalog-backend-module-github","Backend","0.6.5","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic","`GITHUB_ORG`;","Disabled" +"GitHub Org ","@backstage/plugin-catalog-backend-module-github-org","Backend","0.1.17","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-org-dynamic","`GITHUB_URL`;`GITHUB_ORG`;","Disabled" +"Keycloak ","@janus-idp/backstage-plugin-keycloak-backend","Backend","1.13.2","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-keycloak-backend-dynamic","`KEYCLOAK_BASE_URL`;`KEYCLOAK_LOGIN_REALM`;`KEYCLOAK_REALM`;`KEYCLOAK_CLIENT_ID`;`KEYCLOAK_CLIENT_SECRET`;","Disabled" +"Kubernetes ","@backstage/plugin-kubernetes-backend","Backend","0.18.3","Production","./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic","`K8S_CLUSTER_NAME`;`K8S_CLUSTER_URL`;`K8S_CLUSTER_TOKEN`;","Disabled" +"OCM ","@janus-idp/backstage-plugin-ocm","Frontend","4.4.4","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-ocm",";","Disabled" +"OCM ","@janus-idp/backstage-plugin-ocm-backend","Backend","4.4.2","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-ocm-backend-dynamic","`OCM_HUB_NAME`;`OCM_HUB_URL`;`OCM_SA_TOKEN`;","Disabled" +"Quay ","@janus-idp/backstage-plugin-quay","Frontend","1.11.7","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-quay",";","Disabled" +"Quay ","@janus-idp/backstage-scaffolder-backend-module-quay","Backend","1.7.1","Production","./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-quay-dynamic",";","Enabled" +"RBAC ","@janus-idp/backstage-plugin-rbac","Frontend","1.29.5","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-rbac",";","Disabled" +"Regex ","@janus-idp/backstage-scaffolder-backend-module-regex","Backend","1.7.1","Production","./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-regex-dynamic",";","Enabled" +"Tekton ","@janus-idp/backstage-plugin-tekton","Frontend","3.12.7","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-tekton",";","Disabled" +"Topology ","@janus-idp/backstage-plugin-topology","Frontend","1.27.5","Production","./dynamic-plugins/dist/janus-idp-backstage-plugin-topology",";","Disabled" +"Ansible Automation Platform (AAP) ","@janus-idp/backstage-plugin-aap-backend","Backend","1.9.3","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-plugin-aap-backend-dynamic","`AAP_BASE_URL`;`AAP_AUTH_TOKEN`;","Disabled" +"ACR ","@janus-idp/backstage-plugin-acr","Frontend","1.7.11","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-plugin-acr",";","Disabled" +"Bulk Import ","@janus-idp/backstage-plugin-bulk-import","Frontend","1.4.9","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import",";","Disabled" +"JFrog Artifactory ","@janus-idp/backstage-plugin-jfrog-artifactory","Frontend","1.7.5","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-plugin-jfrog-artifactory",";","Disabled" +"Nexus Repository Manager ","@janus-idp/backstage-plugin-nexus-repository-manager","Frontend","1.9.5","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-plugin-nexus-repository-manager",";","Disabled" +"ServiceNow ","@janus-idp/backstage-scaffolder-backend-module-servicenow","Backend","1.7.2","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-servicenow-dynamic","`SERVICENOW_BASE_URL`;`SERVICENOW_USERNAME`;`SERVICENOW_PASSWORD`;","Disabled" +"SonarQube ","@janus-idp/backstage-scaffolder-backend-module-sonarqube","Backend","1.7.1","Red Hat Tech Preview","./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-sonarqube-dynamic",";","Disabled" +"3scale ","@backstage-community/plugin-3scale-backend","Backend","1.8.2","Community Support","./dynamic-plugins/dist/backstage-community-plugin-3scale-backend-dynamic","`THREESCALE_BASE_URL`;`THREESCALE_ACCESS_TOKEN`;","Disabled" +"Argo CD ","@roadiehq/scaffolder-backend-argocd","Backend","1.1.27","Community Support","./dynamic-plugins/dist/roadiehq-scaffolder-backend-argocd-dynamic","`ARGOCD_USERNAME`;`ARGOCD_PASSWORD`;`ARGOCD_INSTANCE1_URL`;`ARGOCD_AUTH_TOKEN`;`ARGOCD_INSTANCE2_URL`;`ARGOCD_AUTH_TOKEN2`;","Disabled" +"Argo CD (Red Hat) ","@backstage-community/plugin-redhat-argocd","Frontend","1.8.3","Community Support","./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd",";","Disabled" +"Azure ","@backstage/plugin-scaffolder-backend-module-azure","Backend","0.1.14","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-azure-dynamic",";","Disabled" +"Azure Devops ","@backstage/plugin-azure-devops","Frontend","0.4.4","Community Support","./dynamic-plugins/dist/backstage-plugin-azure-devops",";","Disabled" +"Azure Devops ","@backstage/plugin-azure-devops-backend","Backend","0.6.5","Community Support","./dynamic-plugins/dist/backstage-plugin-azure-devops-backend-dynamic","`AZURE_TOKEN`;`AZURE_ORG`;","Disabled" +"Azure Repositories ","@parfuemerie/douglas-scaffolder-backend-module-azure-repositories","Backend","0.3.0","Community Support","./dynamic-plugins/dist/parfuemerie-douglas-scaffolder-backend-module-azure-repositories",";","Disabled" +"Bitbucket Cloud ","@backstage/plugin-catalog-backend-module-bitbucket-cloud","Backend","0.2.9","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic","`BITBUCKET_WORKSPACE`;","Disabled" +"Bitbucket Cloud ","@backstage/plugin-scaffolder-backend-module-bitbucket-cloud","Backend","0.1.12","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-cloud-dynamic",";","Disabled" +"Bitbucket Server ","@backstage/plugin-catalog-backend-module-bitbucket-server","Backend","0.1.36","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-server-dynamic","`BITBUCKET_HOST`;","Disabled" +"Bitbucket Server ","@backstage/plugin-scaffolder-backend-module-bitbucket-server","Backend","0.1.12","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-server-dynamic",";","Disabled" +"Bulk Import ","@janus-idp/backstage-plugin-bulk-import-backend","Backend","1.5.4","Community Support","./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import-backend-dynamic",";","Disabled" +"Datadog ","@roadiehq/backstage-plugin-datadog","Frontend","2.3.0","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-datadog",";","Disabled" +"Dynatrace ","@backstage/plugin-dynatrace","Frontend","10.0.4","Community Support","./dynamic-plugins/dist/backstage-plugin-dynatrace",";","Disabled" +"Gerrit ","@backstage/plugin-scaffolder-backend-module-gerrit","Backend","0.1.14","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gerrit-dynamic",";","Disabled" +"GitHub ","@backstage/plugin-scaffolder-backend-module-github","Backend","0.4.0","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic",";","Disabled" +"GitHub Actions ","@backstage/plugin-github-actions","Frontend","0.6.16","Community Support","./dynamic-plugins/dist/backstage-plugin-github-actions",";","Disabled" +"GitHub Insights ","@roadiehq/backstage-plugin-github-insights","Frontend","2.3.31","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-github-insights",";","Disabled" +"GitHub Issues ","@backstage/plugin-github-issues","Frontend","0.4.2","Community Support","./dynamic-plugins/dist/backstage-plugin-github-issues",";","Disabled" +"GitHub Pull Requests ","@roadiehq/backstage-plugin-github-pull-requests","Frontend","2.5.29","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-github-pull-requests",";","Disabled" +"GitLab ","@immobiliarelabs/backstage-plugin-gitlab","Frontend","6.6.0","Community Support","./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab",";","Disabled" +"GitLab ","@backstage/plugin-catalog-backend-module-gitlab","Backend","0.3.21","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-dynamic",";","Disabled" +"GitLab ","@immobiliarelabs/backstage-plugin-gitlab-backend","Backend","6.6.0","Community Support","./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab-backend-dynamic","`GITLAB_HOST`;`GITLAB_TOKEN`;","Disabled" +"GitLab ","@backstage/plugin-scaffolder-backend-module-gitlab","Backend","0.4.4","Community Support","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gitlab-dynamic",";","Disabled" +"GitLab Org ","@backstage/plugin-catalog-backend-module-gitlab-org","Backend","0.0.5","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-org-dynamic",";","Disabled" +"Http Request ","@roadiehq/scaffolder-backend-module-http-request","Backend","4.3.2","Community Support","./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-http-request-dynamic",";","Disabled" +"Jenkins ","@backstage/plugin-jenkins","Frontend","0.9.10","Community Support","./dynamic-plugins/dist/backstage-plugin-jenkins",";","Disabled" +"Jenkins ","@backstage/plugin-jenkins-backend","Backend","0.4.5","Community Support","./dynamic-plugins/dist/backstage-plugin-jenkins-backend-dynamic","`JENKINS_URL`;`JENKINS_USERNAME`;`JENKINS_TOKEN`;","Disabled" +"Jira ","@roadiehq/backstage-plugin-jira","Frontend","2.5.8","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-jira",";","Disabled" +"Kubernetes ","@backstage/plugin-kubernetes","Frontend","0.11.12","Community Support","./dynamic-plugins/dist/backstage-plugin-kubernetes",";","Disabled" +"Ldap ","@backstage/plugin-catalog-backend-module-ldap","Backend","0.7.0","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-ldap-dynamic",";","Disabled" +"Lighthouse ","@backstage/plugin-lighthouse","Frontend","0.4.20","Community Support","./dynamic-plugins/dist/backstage-plugin-lighthouse",";","Disabled" +"Logs ","@backstage/plugin-catalog-backend-module-logs","Backend","0.0.1","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-logs-dynamic",";","Disabled" +"MS Graph ","@backstage/plugin-catalog-backend-module-msgraph","Backend","0.5.30","Community Support","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-msgraph-dynamic",";","Disabled" +"PagerDuty ","@pagerduty/backstage-plugin","Frontend","0.15.0","Community Support","./dynamic-plugins/dist/pagerduty-backstage-plugin",";","Disabled" +"PagerDuty ","@pagerduty/backstage-plugin-backend","Backend","0.6.1","Community Support","./dynamic-plugins/dist/pagerduty-backstage-plugin-backend-dynamic","`PAGERDUTY_API_BASE`;`PAGERDUTY_CLIENT_ID`;`PAGERDUTY_CLIENT_SECRET`;`PAGERDUTY_SUBDOMAIN`;","Disabled" +"Pingidentity ","@backstage-community/plugin-catalog-backend-module-pingidentity","Backend","0.1.2","Community Support","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-pingidentity-dynamic",";","Disabled" +"Scaffolder Relation Processor ","@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor","Backend","1.2.6","Community Support","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-scaffolder-relation-processor-dynamic",";","Disabled" +"Security Insights ","@roadiehq/backstage-plugin-security-insights","Frontend","2.3.19","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-security-insights",";","Disabled" +"SonarQube ","@backstage/plugin-sonarqube","Frontend","0.7.17","Community Support","./dynamic-plugins/dist/backstage-plugin-sonarqube",";","Disabled" +"SonarQube ","@backstage/plugin-sonarqube-backend","Backend","0.2.20","Community Support","./dynamic-plugins/dist/backstage-plugin-sonarqube-backend-dynamic","`SONARQUBE_URL`;`SONARQUBE_TOKEN`;","Disabled" +"Tech Radar ","@backstage/plugin-tech-radar","Frontend","0.7.4","Community Support","./dynamic-plugins/dist/backstage-plugin-tech-radar",";","Disabled" +"Utils ","@roadiehq/scaffolder-backend-module-utils","Backend","1.17.1","Community Support","./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-utils-dynamic",";","Disabled" diff --git a/modules/dynamic-plugins/rhdh-supported-plugins.sh b/modules/dynamic-plugins/rhdh-supported-plugins.sh new file mode 100755 index 0000000000..22a7b2191b --- /dev/null +++ b/modules/dynamic-plugins/rhdh-supported-plugins.sh @@ -0,0 +1,322 @@ +#!/bin/bash + +# script to generate rhdh-supported-plugins.adoc from content in +# https://github.com/janus-idp/backstage-plugins/tree/main/plugins/ */package.json +# https://github.com/janus-idp/backstage-showcase/tree/main/dynamic-plugins/wrappers/ */json + +SCRIPT_DIR=$(cd "$(dirname "$0")" || exit; pwd) + +usage() { + cat </dev/null || exit + git clone https://github.com/janus-idp/backstage-plugins --depth 1 -b "$BRANCH" + popd >/dev/null || exit +fi + +# TODO switch this to redhat-developer/rhdh +if [[ ! -d /tmp/backstage-showcase ]]; then + pushd /tmp >/dev/null || exit + git clone https://github.com/janus-idp/backstage-showcase --depth 1 -b "$BRANCH" + popd >/dev/null || exit +fi + +# thanks to https://stackoverflow.com/questions/42925485/making-a-script-that-transforms-sentences-to-title-case +# shellcheck disable=SC2048 disable=SC2086 +titlecase() { + for f in ${*} ; do \ + case $f in + aap) echo -n "Ansible Automation Platform (AAP) ";; + # UPPERCASE these exceptions + acr|cd|ocm|rbac) echo -n "${f^^} ";; + # MixedCase exceptions + argocd) echo -n "Argo CD ";; + github) echo -n "GitHub ";; + gitlab) echo -n "GitLab ";; + jfrog) echo -n "JFrog ";; + msgraph) echo -n "MS Graph ";; + pagerduty) echo -n "PagerDuty ";; + servicenow) echo -n "ServiceNow ";; + sonarqube) echo -n "SonarQube ";; + techdocs) echo -n "TechDocs ";; + # Uppercase the first letter + *) echo -n "${f^} " ;; + esac; + done; echo; +} + +# generate a list of plugin:version mapping from the following files + # * dynamic-plugins/imports/package.json#.peerDependencies or .dependencies + # * packages/app/package.json#.dependencies + # * packages/backend/package.json#.dependencies + pluginVersFile=/tmp/plugin-versions.txt + jq -r '.peerDependencies' /tmp/backstage-showcase/dynamic-plugins/imports/package.json | grep -E -v "\"\*\"|\{|\}" | grep "@" | tr -d "," > $pluginVersFile + jq -r '.dependencies' /tmp/backstage-showcase/packages/{app,backend}/package.json | grep -E -v "\"\*\"|\{|\}" | grep "@" | tr -d "," >> $pluginVersFile + cat $pluginVersFile | sort -uV > $pluginVersFile.out; mv -f $pluginVersFile.out $pluginVersFile + +# create arrays of adoc and csv content +declare -A adoc1 +declare -A adoc2 +declare -A adoc3 +declare -A csv + +# process 2 folders of json files +jsons=$(find /tmp/backstage-showcase/dynamic-plugins/wrappers/ /tmp/backstage-plugins/plugins/ -maxdepth 2 -name package.json | sort -V) +c=0 +tot=0 +for j in $jsons; do + (( tot++ )) || true +done + +# string listing the enabled-by-default plugins to add to con-preinstalled-dynamic-plugins.template.adoc +ENABLED_PLUGINS="/tmp/ENABLED_PLUGINS.txt"; rm -f $ENABLED_PLUGINS; touch $ENABLED_PLUGINS + +for j in $jsons; do + (( c++ )) || true + echo "[$c/$tot] Processing $j ..." + Required_Variables="" + Required_Variables_="" + + # extract content + Name=$(jq -r '.name' "$j") + + # backstage-plugin-catalog-backend-module-bitbucket-cloud => @backstage/plugin-catalog-backend-module-bitbucket-cloud + Plugin="${Name}" + if [[ $Plugin != "@"* ]]; then # don't update janus-idp/backstage-plugins plugin names + Plugin="$(echo "${Plugin}" | sed -r -e 's/([^-]+)-(.+)/\@\1\/\2/' -e 's|backstage/community-|backstage-community/|')" + fi + + # "dynamic-plugins/wrappers/backstage-plugin-catalog-backend-module-bitbucket-cloud" ==> ./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic + Path=$(jq -r '.repository.directory' "$j") + if [[ $Path == *"/wrappers/"* ]]; then + Path="./${Path/wrappers/dist}-dynamic" + else + Path="$(echo "${Plugin/@/}" | tr "/" "-")" + Path="./dynamic-plugins/dist/${Path}-dynamic" + fi + # remove dupe suffixes + Path="${Path/-dynamic-dynamic/-dynamic}" + + # echo "Path = $Path" + # shellcheck disable=SC2016 + found_in_default_config1=$(yq -r --arg Path "${Path/-dynamic/}" '.plugins[] | select(.package == $Path)' /tmp/backstage-showcase/dynamic-plugins.default.yaml) + # shellcheck disable=SC2016 + found_in_default_config2=$(yq -r --arg Path "${Path}" '.plugins[] | select(.package == $Path)' /tmp/backstage-showcase/dynamic-plugins.default.yaml) + # echo "[DEBUG] default configs:" + # echo " $found_in_default_config2" | jq -r '.package' + # echo " $found_in_default_config1" | jq -r '.package' + # echo " /wrappers/ == $j" + + Path2=$(echo "$found_in_default_config2" | jq -r '.package') # with -dynamic suffix + if [[ $Path2 ]]; then + Path=$Path2 + # echo "[DEBUG] check path - $Name :: got $Path2" + else + Path=$(echo "$found_in_default_config1" | jq -r '.package') # without -dynamic suffix + # echo "[DEBUG] check path - $Name :: got $Path" + fi + if [[ ! $Path ]]; then + continue + elif [[ $Path ]] || [[ "$j" == *"/wrappers/"* ]]; then + + # RHIDP-3203 just use the .package value from /tmp/backstage-showcase/dynamic-plugins.default.yaml as the Path + + + Role=$(jq -r '.backstage.role' "$j") + + Version=$(jq -r '.version' "$j") + # check this version against other references to the plugin in + # * dynamic-plugins/imports/package.json#.peerDependencies or .dependencies + # * packages/app/package.json#.dependencies + # * packages/backend/package.json#.dependencies + echo "[DEBUG] Check version of $Name is really $Version ..." + match=$(grep "\"$Name\": \"" $pluginVersFile || true) + if [[ $match ]]; then + Version=$(echo "${match}" | sed -r -e "s/.+\": \"([0-9.]+)\"/\1/") + echo "[DEBUG] Updated version = $Version" + fi + + # default to community unless it's a RH-authored plugin + Support_Level="Community Support" + keywords=$(jq -r '.keywords' "$j") + author=$(jq -r '.author' "$j") + if [[ $author == "Red Hat" ]]; then + if [[ $keywords == *"support:production"* ]]; then + Support_Level="Production" + elif [[ $keywords == *"support:tech-preview"* ]]; then + # mark Tech Preview wrappers as Community Supported + if [[ "$j" != *"/wrappers/"* ]]; then + Support_Level="Red Hat Tech Preview" + fi + fi + fi + + # compute Default from dynamic-plugins.default.yaml + # shellcheck disable=SC2016 + disabled=$(yq -r --arg Path "${Path/-dynamic/}" '.plugins[] | select(.package == $Path) | .disabled' /tmp/backstage-showcase/dynamic-plugins.default.yaml) + # shellcheck disable=SC2016 + if [[ ! $disabled ]]; then disabled=$(yq -r --arg Path "${Path}" '.plugins[] | select(.package == $Path) | .disabled' /tmp/backstage-showcase/dynamic-plugins.default.yaml); fi + # echo "Using Path = $Path got disabled = $disabled" + # null or false == enabled by default + Default="Enabled" + if [[ $disabled == "true" ]]; then + Default="Disabled" + else + # see https://issues.redhat.com/browse/RHIDP-3187 - only Production-level support (GA) plugins should be enabled by default + if [[ $Support_Level == "Production" ]]; then + echo "* \`${Plugin}\`" >> "$ENABLED_PLUGINS" + else + echo "[ERROR]: $Plugin should not be enabled by default as its support level is $Support_Level!" | tee -a ${ENABLED_PLUGINS}.errors + fi + fi + + # compute Required_Variables from dynamic-plugins.default.yaml - look for all caps params + # shellcheck disable=SC2016 + Required_Variables="$(yq -r --arg Path "${Path/-dynamic/}" '.plugins[] | select(.package == $Path)' /tmp/backstage-showcase/dynamic-plugins.default.yaml | grep "\${" | sed -r -e 's/.+: "\$\{(.+)\}".*/\1/')" + if [[ ! $Required_Variables ]]; then Required_Variables="$(yq -r --arg Path "${Path}" '.plugins[] | select(.package == $Path)' /tmp/backstage-showcase/dynamic-plugins.default.yaml | grep "\${" | sed -r -e 's/.+: "\$\{(.+)\}".*/\1/')"; fi + for RV in $Required_Variables; do + this_RV="$(echo "${RV}" | tr -d "\$\{\}\"")" + Required_Variables_="${Required_Variables_}\`$this_RV\`\n\n" + done + Required_Variables="${Required_Variables_}" + Required_Variables_CSV=$(echo -e "$Required_Variables" | tr -s "\n" ";") + # not currently used due to policy and support concern with upstream content linked from downstream doc + # URL="https://www.npmjs.com/package/$Plugin" + + # echo -n "Converting $Name" + Name="$(echo "${Name}" | sed -r \ + -e "s@(pagerduty)-.+@\1@g" \ + -e "s@.+(-plugin-scaffolder-backend-module|backstage-scaffolder-backend-module)-(.+)@\2@g" \ + -e "s@.+(-plugin-catalog-module|-plugin-catalog-backend-module)-(.+)@\2@g" \ + -e "s@.+(-scaffolder-backend-module|-plugin-catalog-backend-module)-(.+)@\2@g" \ + -e "s@.+(-scaffolder-backend-module|-scaffolder-backend|backstage-plugin)-(.+)@\2@g" \ + -e "s@(backstage-community-plugin-)@@g" \ + -e "s@(backstage-plugin)-(.+)@\2@g" \ + -e "s@(.+)(-backstage-plugin)@\1@g" \ + -e "s@-backend@@g" \ + )" + Name="$(echo "${Name}" | sed -r -e "s/redhat-(.+)/\1-\(Red-Hat\)/")" + PrettyName="$(titlecase "${Name//-/ }")" + # echo " to $Name and $PrettyName" + + # useful console output + for col in Name PrettyName Role Plugin Version Support_Level Path Required_Variables Default; do + echo "Got $col = ${!col}" + done + + # save in an array sorted by name, then role, with frontend before backend plugins (for consistency with 1.1 markup) + RoleSort=1; if [[ $Role != *"front"* ]]; then RoleSort=2; Role="Backend"; else Role="Frontend"; fi + if [[ $Plugin == *"scaffolder"* ]]; then RoleSort=3; fi + + # TODO include missing data fields for Provider and Description - see https://issues.redhat.com/browse/RHIDP-3496 and https://issues.redhat.com/browse/RHIDP-3440 + + # split into three tables based on support level + if [[ ${Support_Level} == "Production" ]]; then + adoc1["$Name-$RoleSort-$Role-$Plugin"]="|$PrettyName |\`https://npmjs.com/package/$Plugin/v/$Version[$Plugin]\` |$Version \n|\`$Path\`\n\n$Required_Variables" + elif [[ ${Support_Level} == "Red Hat Tech Preview" ]]; then + adoc2["$Name-$RoleSort-$Role-$Plugin"]="|$PrettyName |\`https://npmjs.com/package/$Plugin/v/$Version[$Plugin]\` |$Version \n|\`$Path\`\n\n$Required_Variables" + else + adoc3["$Name-$RoleSort-$Role-$Plugin"]="|$PrettyName |\`https://npmjs.com/package/$Plugin/v/$Version[$Plugin]\` |$Version \n|\`$Path\`\n\n$Required_Variables" + fi + + # NOTE: csv is not split into separate tables at this point + csv["$Name-$RoleSort-$Role-$Plugin"]="\"$PrettyName\",\"$Plugin\",\"$Role\",\"$Version\",\"$Support_Level\",\"$Path\",\"${Required_Variables_CSV}\",\"$Default\"" + else + (( tot-- )) || true + echo " Skip: not in backstage-showcase/dynamic-plugins.default.yaml !" + fi + echo +done + +# create .csv file with header +echo -e "\"Name\",\"Plugin\",\"Role\",\"Version\",\"Support Level\",\"Path\",\"Required Variables\",\"Default\"" > "${0/.sh/.csv}" + +num_plugins=() +# append to .csv and .adocN files +rm -f "${0/.sh/.adoc1}" +sorted=(); while IFS= read -rd '' key; do sorted+=( "$key" ); done < <(printf '%s\0' "${!adoc1[@]}" | sort -z) +for key in "${sorted[@]}"; do + echo -e "${adoc1[$key]}" >> "${0/.sh/.ref-rh-supported-plugins}" + if [[ $key != *"techdocs"* ]]; then + echo -e "${csv[$key]}" >> "${0/.sh/.csv}" + fi +done +num_plugins+=(${#adoc1[@]}) + +rm -f "${0/.sh/.adoc2}" +sorted=(); while IFS= read -rd '' key; do sorted+=( "$key" ); done < <(printf '%s\0' "${!adoc2[@]}" | sort -z) +for key in "${sorted[@]}"; do echo -e "${adoc2[$key]}" >> "${0/.sh/.ref-rh-tech-preview-plugins}"; echo -e "${csv[$key]}" >> "${0/.sh/.csv}"; done +num_plugins+=(${#adoc2[@]}) + +rm -f "${0/.sh/.adoc3}" +sorted=(); while IFS= read -rd '' key; do sorted+=( "$key" ); done < <(printf '%s\0' "${!adoc3[@]}" | sort -z) +for key in "${sorted[@]}"; do echo -e "${adoc3[$key]}" >> "${0/.sh/.ref-community-plugins}"; echo -e "${csv[$key]}" >> "${0/.sh/.csv}"; done +num_plugins+=(${#adoc3[@]}) + +# merge the content from the three .adocX files into the .template.adoc file, replacing the TABLE_CONTENT markers +count=0 +for d in ref-rh-supported-plugins ref-rh-tech-preview-plugins ref-community-plugins; do + this_num_plugins=${num_plugins[$count]} + (( count = count + 1 )) + echo "[$count] Processing $d ..." + adocfile="${0/.sh/.${d}}" + sed -e "/%%TABLE_CONTENT_${count}%%/{r $adocfile" -e 'd}' \ + -e "s/\%\%COUNT_${count}\%\%/$this_num_plugins/" \ + "${0/rhdh-supported-plugins.sh/${d}.template.adoc}" > "${0/rhdh-supported-plugins.sh/${d}.adoc}" + rm -f "$adocfile" +done + +# inject ENABLED_PLUGINS into con-preinstalled-dynamic-plugins.template.adoc +sed -e "/%%ENABLED_PLUGINS%%/{r $ENABLED_PLUGINS" -e 'd}' \ + "${0/rhdh-supported-plugins.sh/con-preinstalled-dynamic-plugins.template.adoc}" > "${0/rhdh-supported-plugins.sh/con-preinstalled-dynamic-plugins.adoc}" + +# summary of changes since last time +SCRIPT_DIR=$(cd "$(dirname "$0")" || exit; pwd) +pushd "$SCRIPT_DIR" >/dev/null || exit + updates=$(git diff "ref*plugins.adoc"| grep -E -v "\+\+|@@" | grep "+") + if [[ $updates ]]; then + echo "$(echo "$updates" | wc -l) Changes include:"; echo "$updates" + fi +popd >/dev/null || exit + +# see https://issues.redhat.com/browse/RHIDP-3187 - only GA plugins should be enabled by default +if [[ -f "${ENABLED_PLUGINS}.errors" ]]; then cat "${ENABLED_PLUGINS}.errors"; fi + +# cleanup +rm -f "$ENABLED_PLUGINS" "${ENABLED_PLUGINS}.errors" +# rm -fr /tmp/backstage-plugins /tmp/backstage-showcase diff --git a/modules/getting-started/con-audit-log-config.adoc b/modules/getting-started/con-audit-log-config.adoc new file mode 100644 index 0000000000..a675e3c2d2 --- /dev/null +++ b/modules/getting-started/con-audit-log-config.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// assembly-audit-log.adoc + +:_mod-docs-content-type: CONCEPT +[id="con-audit-log-config_{context}"] += Configuring audit logs for {product-short} on {ocp-short} + +Use the {ocp-short} web console to configure the following {ocp-short} logging components to use audit logging for {product-short}: + +Logging deployment:: +Configure the logging environment, including both the CPU and memory limits for each logging component. For more information, see link:https://docs.openshift.com/container-platform/4.15/observability/logging/config/cluster-logging-memory.html[{ocp-brand-name} - Configuring your Logging deployment]. + +Logging collector:: +Configure the `spec.collection` stanza in the `ClusterLogging` custom resource (CR) to use a supported modification to the log collector and collect logs from `STDOUT`. For more information, see link:https://docs.openshift.com/container-platform/4.15/observability/logging/log_collection_forwarding/cluster-logging-collector.html[{ocp-brand-name} - Configuring the logging collector]. + +Log forwarding:: +Send logs to specific endpoints inside and outside your {ocp-short} cluster by specifying a combination of outputs and pipelines in a `ClusterLogForwarder` CR. For more information, see link:https://docs.openshift.com/container-platform/4.15/observability/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.html[{ocp-brand-name} - Enabling JSON log forwarding] and link:https://docs.openshift.com/container-platform/4.15/observability/logging/log_collection_forwarding/configuring-log-forwarding.html[{ocp-brand-name} - Configuring log forwarding]. diff --git a/modules/getting-started/con-audit-log-track-changes-catalog.adoc b/modules/getting-started/con-audit-log-track-changes-catalog.adoc new file mode 100644 index 0000000000..28784117b1 --- /dev/null +++ b/modules/getting-started/con-audit-log-track-changes-catalog.adoc @@ -0,0 +1,7 @@ +//This content is currently part of assembly-audit-log.adoc but may need to be included as a separate module in the future + +:_mod-docs-content-type: CONCEPT +[id="con-audit-log-track-changes-catalog_{context}"] += {product-short} catalog database changes + +In {product} {ocp-version-min} and later, changes to the catalog database are forwarded to a central log management system, such as ElasticSearch or Splunk by default. Administrators can view changes that add, remove, or update data in the catalog database to help ensure accountability and transparency of user actions. diff --git a/modules/getting-started/con-servicenow-custom-actions.adoc b/modules/getting-started/con-servicenow-custom-actions.adoc new file mode 100644 index 0000000000..01a47b3693 --- /dev/null +++ b/modules/getting-started/con-servicenow-custom-actions.adoc @@ -0,0 +1,12 @@ +[id='con-servicenow-custom-actions_{context}'] += ServiceNow Custom actions in {product} + +include::{docdir}/artifacts/snip-technology-preview.adoc[] + +In {product}, you can access ServiceNow custom actions (custom actions) for fetching and registering resources in the catalog. + +The custom actions in {product-short} enable you to facilitate and automate the management of records. Using the custom actions, you can perform the following actions: + +* Create, update, or delete a record +* Retrieve information about a single record or multiple records + diff --git a/modules/getting-started/con-techdocs-config-cicd.adoc b/modules/getting-started/con-techdocs-config-cicd.adoc new file mode 100644 index 0000000000..604542179a --- /dev/null +++ b/modules/getting-started/con-techdocs-config-cicd.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: CONCEPT +[id="con-techdocs-config-cicd_{context}"] += Configuring CI/CD to generate and publish TechDocs sites + +TechDocs reads the static generated documentation files from a cloud storage bucket, such as {odf-name}. The documentation site is generated on the CI/CD workflow associated with the repository containing the documentation files. You can generate docs on CI and publish to a cloud storage using the `techdocs-cli` CLI tool. + +You can use the following example to create a script for TechDocs publication: + +[source,shell] +---- +# Prepare +REPOSITORY_URL='https://github.com/org/repo' +git clone $REPOSITORY_URL +cd repo + +# Install @techdocs/cli, mkdocs and mkdocs plugins +npm install -g @techdocs/cli +pip install "mkdocs-techdocs-core==1.*" + +# Generate +techdocs-cli generate --no-docker + +# Publish +techdocs-cli publish --publisher-type awsS3 --storage-name --entity +---- + +The TechDocs workflow starts the CI when a user makes changes in the repository containing the documentation files. You can configure the workflow to start only when files inside the `docs/` directory or `mkdocs.yml` are changed. diff --git a/modules/getting-started/con-techdocs-configure-storage.adoc b/modules/getting-started/con-techdocs-configure-storage.adoc new file mode 100644 index 0000000000..f193aa8145 --- /dev/null +++ b/modules/getting-started/con-techdocs-configure-storage.adoc @@ -0,0 +1,5 @@ +:_mod-docs-content-type: CONCEPT +[id="con-techdocs-configure-storage_{context}"] += Configuring storage for TechDocs files + +The TechDocs publisher stores generated files in local storage or in cloud storage, such as {odf-name}, Google GCS, AWS S3, or Azure Blob Storage. diff --git a/modules/getting-started/proc-add-custom-app-file-openshift-helm.adoc b/modules/getting-started/proc-add-custom-app-file-openshift-helm.adoc new file mode 100644 index 0000000000..6fbed3a951 --- /dev/null +++ b/modules/getting-started/proc-add-custom-app-file-openshift-helm.adoc @@ -0,0 +1,46 @@ +[id='proc-add-custom-app-file-openshift-helm_{context}'] += Adding a custom application configuration file to {ocp-short} using the Helm chart + +You can use the {product} Helm chart to add a custom application configuration file to your {ocp-short} instance. + +.Prerequisites + +* You have created an {ocp-brand-name} account. + +.Procedure + +. From the {ocp-short} web console, select the *ConfigMaps* tab. +. Click *Create ConfigMap*. +. From *Create ConfigMap* page, select the *YAML view* option in *Configure via* and make changes to the file, if needed. +. Click *Create*. +. Go to the *Helm* tab to see the list of Helm releases. +. Click the overflow menu on the Helm release that you want to use and select *Upgrade*. +. Use either the *Form view* or *YAML view* to edit the Helm configuration. + +** Using *Form view* ++ +.. Expand *Root Schema → Backstage chart schema → Backstage parameters → Extra app configuration files to inline into command arguments*. +.. Click the *Add Extra app configuration files to inline into command arguments* link. +.. Enter the value in the following fields: ++ +-- +* *configMapRef*: `app-config-rhdh` +* *filename*: `app-config-rhdh.yaml` +-- +.. Click *Upgrade*. + +** Using *YAML view* + +.. Set the value of the `upstream.backstage.extraAppConfig.configMapRef` and `upstream.backstage.extraAppConfig.filename` parameters as follows: ++ +[source, yaml] +---- +# ... other Red Hat Developer Hub Helm Chart configurations +upstream: + backstage: + extraAppConfig: + - configMapRef: app-config-rhdh + filename: app-config-rhdh.yaml +# ... other Red Hat Developer Hub Helm Chart configurations +---- +.. Click *Upgrade*. diff --git a/modules/getting-started/proc-audit-log-view.adoc b/modules/getting-started/proc-audit-log-view.adoc new file mode 100644 index 0000000000..3a77b8249e --- /dev/null +++ b/modules/getting-started/proc-audit-log-view.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// assembly-audit-log.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-audit-log-view_{context}"] += Viewing audit logs in {product-short} + +Administrators can view, search, filter, and manage the log data from the {ocp-brand-name} web console. You can filter audit logs from other log types by using the `isAuditLog` field. + +.Prerequisites +* You are logged in as an administrator in the {ocp-short} web console. + +.Procedure + +. From the *Developer* perspective of the {ocp-short} web console, click the *Topology* tab. +. From the *Topology* view, click the pod that you want to view audit log data for. +. From the pod panel, click the *Resources* tab. +. From the *Pods* section of the *Resources* tab, click *View logs*. +. From the *Logs* view, enter `isAuditLog` into the *Search* field to filter audit logs from other log types. You can use the arrows to browse the logs containing the `isAuditLog` field. diff --git a/modules/getting-started/proc-customize-rhdh-homepage.adoc b/modules/getting-started/proc-customize-rhdh-homepage.adoc new file mode 100644 index 0000000000..c864ebedbd --- /dev/null +++ b/modules/getting-started/proc-customize-rhdh-homepage.adoc @@ -0,0 +1,170 @@ +[id='proc-customize-rhdh-homepage_{context}'] += Customizing the Home page in {product} + +To access the Home page in {product}, the base URL must include the `/developer-hub` proxy. You can configure the Home page by passing the data into the `app-config.yaml` file as a proxy. You can provide data to the Home page from the following sources: + +* JSON files hosted on GitHub or GitLab. +* A dedicated service that provides the Home page data in JSON format using an API. + +== Using hosted JSON files to provide data to the Home page + +.Prerequisites + +You have installed {product} by using either the Operator or Helm chart. + +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}]. + +.Procedure + +To access the data from the JSON files, complete the following step: + +* Add the following code to the `app-config.yaml` file: ++ +[source,yaml] +---- +proxy: + endpoints: + # Other Proxies + # customize developer hub instance + '/developer-hub': + target: # i.e https://raw.githubusercontent.com/ + pathRewrite: + '^/api/proxy/developer-hub': # i.e /janus-idp/backstage-showcase/main/packages/app/public/homepage/data.json + changeOrigin: true + secure: true + # Change to "false" in case of using self hosted cluster with a self-signed certificate + headers: + : # optional and can be passed as needed i.e Authorization can be passed for private GitHub repo and PRIVATE-TOKEN can be passed for private GitLab repo +---- + +== Using a dedicated service to provide data to the Home page + +When using a dedicated service, you can do the following: + +* Use the same service to provide the data to all configurable {product-short} pages or use a different service for each page. +* Use the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`] as an example service, which provides data for both the Home and Tech Radar pages. The `red-hat-developer-hub-customization-provider` service provides the same data as default {product-short} data. You can fork the `red-hat-developer-hub-customization-provider` service repository from GitHub and modify it with your own data, if required. +* Deploy the `red-hat-developer-hub-customization-provider` service and the {product-short} Helm chart on the same cluster. + +.Prerequisites + +* You have installed the {product} using Helm Chart. +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart]. + +.Procedure + +To use a separate service to provide the Home page data, complete the following steps: + +. From the *Developer* perspective in the {ocp-brand-name} web console, click *+Add* > *Import from Git*. +. Enter the URL of your Git repository into the *Git Repo URL* field. ++ +-- +To use the `red-hat-developer-hub-customization-provider` service, add the URL for the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[red-hat-developer-hub-customization-provider] repository or your fork of the repository containing your customizations. +-- + +. On the *General* tab, enter *red-hat-developer-hub-customization-provider* in the *Name* field and click *Create*. +. On the *Advanced Options* tab, copy the value from the *Target Port*. ++ +[NOTE] +==== +The *Target Port* automatically generates a Kubernetes or {ocp-short} service to communicate with. +==== ++ +. Add the following code to the `app-config-rhdh.yaml` file: ++ +[source,yaml] +---- +proxy: + endpoints: + # Other Proxies + # customize developer hub instance + '/developer-hub': + target: ${HOMEPAGE_DATA_URL} + changeOrigin: true + # Change to "false" in case of using self-hosted cluster with a self-signed certificate + secure: true +---- +where `HOMEPAGE_DATA_URL` is defined as `pass:c[http://:8080]`, for example, `pass:c[http://rhdh-customization-provider:8080]`. ++ +[NOTE] +==== +The `red-hat-developer-hub-customization-provider` service contains the 8080 port by default. If you are using a custom port, you can specify it with the 'PORT' environmental variable in the `app-config-rhdh.yaml` file. +==== ++ +. Replace the `HOMEPAGE_DATA_URL` by adding the URL to `rhdh-secrets` or by directly replacing it in your custom ConfigMap. ++ +. Delete the {product-short} pod to ensure that the new configurations are loaded correctly. + +.Verification +* To view the service, navigate to the *Administrator* perspective in the {ocp-short} web console and click *Networking* > *Service*. ++ +[NOTE] +==== +You can also view the *Service Resources* in the Topology view. +==== + +* Ensure that the provided API URL for the Home page returns the data in JSON format as shown in the following example: ++ +[source,json] +---- +[ + { + "title": "Dropdown 1", + "isExpanded": false, + "links": [ + { + "iconUrl": "https://imagehost.com/image.png", + "label": "Dropdown 1 Item 1", + "url": "https://example.com/" + }, + { + "iconUrl": "https://imagehost2.org/icon.png", + "label": "Dropdown 1 Item 2", + "url": "" + } + ] + }, + { + "title": "Dropdown 2", + "isExpanded": true, + "links": [ + { + "iconUrl": "http://imagehost3.edu/img.jpg", + "label": "Dropdown 2 Item 1", + "url": "http://example.com" + } + ] + } +] +---- ++ +[NOTE] +==== +If the request call fails or is not configured, the {product-short} instance falls back to the default local data. +==== + +* If the images or icons do not load, then allowlist them by adding your image or icon host URLs to the content security policy’s (csp) `img-src` in your custom ConfigMap as follows: + +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +data: + app-config-rhdh.yaml: | + app: + title: Red Hat Developer Hub + backend: + csp: + connect-src: + - "'self'" + - 'http:' + - 'https:' + img-src: + - "'self'" + - 'data:' + - + - + - + # Other Configurations +---- diff --git a/modules/getting-started/proc-customize-rhdh-learning-paths.adoc b/modules/getting-started/proc-customize-rhdh-learning-paths.adoc new file mode 100644 index 0000000000..2f99273054 --- /dev/null +++ b/modules/getting-started/proc-customize-rhdh-learning-paths.adoc @@ -0,0 +1,82 @@ +[id='proc-customize-rhdh-learning-paths_{context}'] += Customizing the Learning Paths in {product} + +In {product}, you can configure Learning Paths by passing the data into the `app-config.yaml` file as a proxy. The base URL must include the `/developer-hub/learning-paths` proxy. + +[NOTE] +==== +Due to the use of overlapping `pathRewrites` for both the `learning-path` and `homepage` quick access proxies, you must create the `learning-paths` configuration (`^api/proxy/developer-hub/learning-paths`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`). + +For more information about customizing the Home page in {product}, see xref:proc-customize-rhdh-homepage_rhdh-getting-started[Customizing the Home page in {product}]. +==== + +You can provide data to the Learning Path from the following sources: + +* JSON files hosted on GitHub or GitLab. +* A dedicated service that provides the Learning Path data in JSON format using an API. + +== Using hosted JSON files to provide data to the Learning Paths + +.Prerequisites + +You have installed {product} by using either the Operator or Helm chart. +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}]. + +.Procedure + +To access the data from the JSON files, complete the following step: + +* Add the following code to the `app-config.yaml` file: ++ +[source,yaml] +---- +proxy: + endpoints: + '/developer-hub': + target: https://raw.githubusercontent.com/ + pathRewrite: + '^/api/proxy/developer-hub/learning-paths': '/janus-idp/backstage-showcase/main/packages/app/public/learning-paths/data.json' + '^/api/proxy/developer-hub/tech-radar': '/janus-idp/backstage-showcase/main/packages/app/public/tech-radar/data-default.json' + '^/api/proxy/developer-hub': '/janus-idp/backstage-showcase/main/packages/app/public/homepage/data.json' + changeOrigin: true + secure: true +---- + +== Using a dedicated service to provide data to the Learning Paths + +When using a dedicated service, you can do the following: + +* Use the same service to provide the data to all configurable {product-short} pages or use a different service for each page. +* Use the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`] as an example service, which provides data for both the Home and Tech Radar pages. The `red-hat-developer-hub-customization-provider` service provides the same data as default {product-short} data. You can fork the `red-hat-developer-hub-customization-provider` service repository from GitHub and modify it with your own data, if required. +* Deploy the `red-hat-developer-hub-customization-provider` service and the {product-short} Helm chart on the same cluster. + +.Prerequisites + +* You have installed the {product} using Helm chart. +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}]. + +.Procedure + +To use a dedicated service to provide the Learning Path data, complete the following steps: + +. Add the following code to the `app-config-rhdh.yaml` file: ++ +[source,yaml] +---- + proxy: + endpoints: + # Other Proxies + '/developer-hub/learning-paths': + target: ${LEARNING_PATH_DATA_URL} + changeOrigin: true + # Change to "false" in case of using self hosted cluster with a self-signed certificate + secure: true +---- +where the `LEARNING_PATH_DATA_URL` is defined as `pass:c[http:///learning-paths]`, for example, `pass:c[http://rhdh-customization-provider/learning-paths]`. ++ +[NOTE] +==== +You can define the `LEARNING_PATH_DATA_URL` by adding it to `rhdh-secrets` or by directly replacing it with its value in your custom ConfigMap. +==== ++ +. Delete the {product-short} pod to ensure that the new configurations are loaded correctly. diff --git a/modules/getting-started/proc-customize-rhdh-tech-radar-page.adoc b/modules/getting-started/proc-customize-rhdh-tech-radar-page.adoc new file mode 100644 index 0000000000..20764cbe68 --- /dev/null +++ b/modules/getting-started/proc-customize-rhdh-tech-radar-page.adoc @@ -0,0 +1,89 @@ +[id='proc-customize-rhdh-tech-radar-page_{context}'] += Customizing the Tech Radar page in {product} + +In {product}, the Tech Radar page is provided by the `tech-radar` dynamic plugin, which is disabled by default. For information about enabling dynamic plugins in {product} see link:{LinkPluginsGuide}[Configuring plugins in {product}]. + +In {product}, you can configure Learning Paths by passing the data into the `app-config.yaml` file as a proxy. The base Tech Radar URL must include the `/developer-hub/tech-radar` proxy. + +[NOTE] +==== +Due to the use of overlapping `pathRewrites` for both the `tech-radar` and `homepage` quick access proxies, you must create the `tech-radar` configuration (`^api/proxy/developer-hub/tech-radar`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`). + +For more information about customizing the Home page in {product}, see xref:proc-customize-rhdh-homepage_rhdh-getting-started[Customizing the Home page in {product}]. +==== + +You can provide data to the Tech Radar page from the following sources: + +* JSON files hosted on GitHub or GitLab. +* A dedicated service that provides the Tech Radar data in JSON format using an API. + +== Using hosted JSON files to provide data to the Tech Radar page + +.Prerequisites + +You have installed {product} by using either the Operator or Helm chart. +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}]. + +.Procedure + +To access the data from the JSON files, complete the following step: + +* Add the following code to the `app-config.yaml` file: ++ +[source,yaml] +---- +proxy: + endpoints: + # Other Proxies + # customize developer hub instance + '/developer-hub': + target: # i.e https://raw.githubusercontent.com/ + pathRewrite: + '^/api/proxy/developer-hub/tech-radar': # i.e /janus-idp/backstage-showcase/main/packages/app/public/tech-radar/data-default.json + '^/api/proxy/developer-hub': # i.e /janus-idp/backstage-showcase/main/packages/app/public/homepage/data.json + changeOrigin: true + secure: true + + # Change to "false" in case of using self hosted cluster with a self-signed certificate + headers: + : # optional and can be passed as needed i.e Authorization can be passed for private GitHub repo and PRIVATE-TOKEN can be passed for private GitLab repo +---- + +== Using a dedicated service to provide data to the Tech Radar page + +When using a dedicated service, you can do the following: + +* Use the same service to provide the data to all configurable {product-short} pages or use a different service for each page. +* Use the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`] as an example service, which provides data for both the Home and Tech Radar pages. The `red-hat-developer-hub-customization-provider` service provides the same data as default {product-short} data. You can fork the `red-hat-developer-hub-customization-provider` service repository from GitHub and modify it with your own data, if required. +* Deploy the `red-hat-developer-hub-customization-provider` service and the {product-short} Helm chart on the same cluster. + +.Prerequisites + +* You have installed the {product} using Helm Chart. +For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart]. + +.Procedure + +To use a separate service to provide the Tech Radar data, complete the following steps: + +. Add the following code to the `app-config-rhdh.yaml` file: ++ +[source,yaml] +---- +proxy: + endpoints: + # Other Proxies + '/developer-hub/tech-radar': + target: ${TECHRADAR_DATA_URL} + changeOrigin: true + # Change to "false" in case of using self hosted cluster with a self-signed certificate + secure: true +---- +where the `TECHRADAR_DATA_URL` is defined as `pass:c[http:///tech-radar]`, for example, `pass:c[http://rhdh-customization-provider/tech-radar]`. ++ +[NOTE] +==== +You can define the `TECHRADAR_DATA_URL` by adding it to `rhdh-secrets` or by directly replacing it with its value in your custom ConfigMap. +==== ++ +. Delete the {product-short} pod to ensure that the new configurations are loaded correctly. diff --git a/modules/getting-started/proc-enable-servicenow-custom-actions-plugin.adoc b/modules/getting-started/proc-enable-servicenow-custom-actions-plugin.adoc new file mode 100644 index 0000000000..7ed0a5c055 --- /dev/null +++ b/modules/getting-started/proc-enable-servicenow-custom-actions-plugin.adoc @@ -0,0 +1,47 @@ +[id='proc-enable-servicenow-custom-actions-plugin_{context}'] += Enabling ServiceNow custom actions plugin in {product} + +In {product}, the ServiceNow custom actions are provided as a pre-loaded plugin, which is disabled by default. You can enable the custom actions plugin using the following procedure. + +.Prerequisites + +* {product} is installed and running. +For more information about installing the {product-short}, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart]. +* You have created a project in the {product-short}. + +.Procedure + +. To activate the custom actions plugin, add a `package` with plugin name and update the `disabled` field in your Helm Chart as follows: ++ +-- +[source,yaml] +---- +global: + dynamic: + includes: + - dynamic-plugins.default.yaml + plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-scaffolder-backend-module-servicenow-dynamic + disabled: false +---- + +[NOTE] +==== +The default configuration for a plugin is extracted from the `dynamic-plugins.default.yaml` file, however, you can use a `pluginConfig` entry to override the default configuration. +==== +-- + +. Set the following variables in the Helm Chart to access the custom actions: ++ +-- +[source,yaml] +---- +servicenow: + # The base url of the ServiceNow instance. + baseUrl: ${SERVICENOW_BASE_URL} + # The username to use for authentication. + username: ${SERVICENOW_USERNAME} + # The password to use for authentication. + password: ${SERVICENOW_PASSWORD} +---- +-- diff --git a/modules/getting-started/proc-techdocs-config-cicd-prep-repo.adoc b/modules/getting-started/proc-techdocs-config-cicd-prep-repo.adoc new file mode 100644 index 0000000000..4a4bc7a951 --- /dev/null +++ b/modules/getting-started/proc-techdocs-config-cicd-prep-repo.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-config-cicd-prep-repo_{context}"] += Preparing your repository for CI + +The first step on the CI is to clone your documentation source repository in a working directory. + +.Procedure + +* To clone your documentation source repository in a working directory, enter the following command: ++ +[source,terminal] +---- +git clone +---- diff --git a/modules/getting-started/proc-techdocs-configure-odf-helm.adoc b/modules/getting-started/proc-techdocs-configure-odf-helm.adoc new file mode 100644 index 0000000000..631c2111ff --- /dev/null +++ b/modules/getting-started/proc-techdocs-configure-odf-helm.adoc @@ -0,0 +1,34 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-configure-odf-helm_{context}"] += Making object storage accessible to containers by using the Helm chart + +Creating a `ObjectBucketClaim` custom resource (CR) automatically generates both the {product-short} `ObjectBucketClaim` config map and secret. The config map and secret contain `ObjectBucket` access information. Adding the access information to the Helm chart configuration makes it accessible to the {product-short} container by adding the following environment variables to the container: + +* `BUCKET_NAME` +* `BUCKET_HOST` +* `BUCKET_PORT` +* `BUCKET_REGION` +* `BUCKET_SUBREGION` +* `AWS_ACCESS_KEY_ID` +* `AWS_SECRET_ACCESS_KEY` + +These variables are then used in the TechDocs plugin configuration. + +.Prerequisites + +* You have installed {product} on {ocp-short} using the Helm chart. +* You have created an `ObjectBucketClaim` CR for storing files generated by TechDocs. For more information see xref:proc-techdocs-using-odf-storage_{context}[Using {odf-name} for file storage] + +.Procedure + +* In the `upstream.backstage` key in the Helm chart values, enter the name of the {product-short} `ObjectBucketClaim` secret as the value for the `extraEnvVarsSecrets` field and the `extraEnvVarsCM` field. For example: ++ +[source,yaml] +---- +upstream: + backstage: + extraEnvVarsSecrets: + - + extraEnvVarsCM: + - +---- diff --git a/modules/getting-started/proc-techdocs-configure-odf-operator.adoc b/modules/getting-started/proc-techdocs-configure-odf-operator.adoc new file mode 100644 index 0000000000..d903c5308d --- /dev/null +++ b/modules/getting-started/proc-techdocs-configure-odf-operator.adoc @@ -0,0 +1,39 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-configure-odf-operator_{context}"] += Making object storage accessible to containers by using the Operator + +Creating a `ObjectBucketClaim` Custom Resource (CR) automatically generates both the {product-short} `ObjectBucketClaim` config map and secret. The config map and secret contain `ObjectBucket` access information. Adding the access information to the Operator configuration makes it accessible to the {product-short} container by adding the following environment variables to the container: + +* `BUCKET_NAME` +* `BUCKET_HOST` +* `BUCKET_PORT` +* `BUCKET_REGION` +* `BUCKET_SUBREGION` +* `AWS_ACCESS_KEY_ID` +* `AWS_SECRET_ACCESS_KEY` + +These variables are then used in the TechDocs plugin configuration. + +.Prerequisites + +* You have installed {product} on {ocp-short} using the Operator. +* You have created an `ObjectBucketClaim` CR for storing files generated by TechDocs. + +.Procedure + +* In the {product-short} `Backstage` CR, enter the name of the {product-short} `ObjectBucketClaim` config map as the value for the `spec.application.extraEnvs.configMaps` field and enter the {product-short} `ObjectBucketClaim` secret name as the value for the `spec.application.extraEnvs.secrets` field. For example: ++ +[source,yaml] +---- +apiVersion: objectbucket.io/v1alpha1 +kind: Backstage +metadata: + name: +spec: + application: + extraEnvs: + configMaps: + - name: + secrets: + - name: +---- diff --git a/modules/getting-started/proc-techdocs-generate-site.adoc b/modules/getting-started/proc-techdocs-generate-site.adoc new file mode 100644 index 0000000000..d70d0e61c7 --- /dev/null +++ b/modules/getting-started/proc-techdocs-generate-site.adoc @@ -0,0 +1,37 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-generate-site_{context}"] += Generating the TechDocs site + +.Procedure + +To configure CI/CD to generate your techdocs, complete the following steps: + +. Install the `npx` package to run `techdocs-cli` using the following command: ++ +[source] +---- +npm install -g npx +---- + +. Install the `techdocs-cli` tool using the following command: ++ +[source] +---- +npm install -g @techdocs/cli +---- + +. Install the `mkdocs` plugins using the following command: ++ +[source] +---- +pip install "mkdocs-techdocs-core==1.*" +---- + +. Generate your techdocs site using the following command: ++ +[source,terminal] +---- +npx @techdocs/cli generate --no-docker --source-dir --output-dir ./site +---- ++ +Where `` is the location in the file path that you used to clone your repository. diff --git a/modules/getting-started/proc-techdocs-publish-site.adoc b/modules/getting-started/proc-techdocs-publish-site.adoc new file mode 100644 index 0000000000..fc8e22f581 --- /dev/null +++ b/modules/getting-started/proc-techdocs-publish-site.adoc @@ -0,0 +1,69 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-publish-site_{context}"] += Publishing the TechDocs site + +.Procedure + +To publish your techdocs site, complete the following steps: + +. Set the necessary authentication environment variables for your cloud storage provider. +. Publish your techdocs using the following command: ++ +[source,terminal] +---- +npx @techdocs/cli publish --publisher-type --storage-name --entity --directory ./site +---- + +. Add a `.github/workflows/techdocs.yml` file in your Software Template(s). For example: ++ +[source,yaml] +---- +name: Publish TechDocs Site + +on: + push: + branches: [main] + # You can even set it to run only when TechDocs related files are updated. + # paths: + # - "docs/**" + # - "mkdocs.yml" + +jobs: + publish-techdocs-site: + runs-on: ubuntu-latest + + # The following secrets are required in your CI environment for publishing files to AWS S3. + # e.g. You can use GitHub Organization secrets to set them for all existing and new repositories. + env: + TECHDOCS_S3_BUCKET_NAME: ${{ secrets.TECHDOCS_S3_BUCKET_NAME }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_REGION }} + ENTITY_NAMESPACE: 'default' + ENTITY_KIND: 'Component' + ENTITY_NAME: 'my-doc-entity' + # In a Software template, Scaffolder will replace {{cookiecutter.component_id | jsonify}} + # with the correct entity name. This is same as metadata.name in the entity's catalog-info.yaml + # ENTITY_NAME: '{{ cookiecutter.component_id | jsonify }}' + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - uses: actions/setup-node@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install techdocs-cli + run: sudo npm install -g @techdocs/cli + + - name: Install mkdocs and mkdocs plugins + run: python -m pip install mkdocs-techdocs-core==1.* + + - name: Generate docs site + run: techdocs-cli generate --no-docker --verbose + + - name: Publish docs site + run: techdocs-cli publish --publisher-type awsS3 --storage-name $TECHDOCS_S3_BUCKET_NAME --entity $ENTITY_NAMESPACE/$ENTITY_KIND/$ENTITY_NAME +---- diff --git a/modules/getting-started/proc-techdocs-using-odf-storage.adoc b/modules/getting-started/proc-techdocs-using-odf-storage.adoc new file mode 100644 index 0000000000..d949226a97 --- /dev/null +++ b/modules/getting-started/proc-techdocs-using-odf-storage.adoc @@ -0,0 +1,39 @@ +:_mod-docs-content-type: PROCEDURE +[id="proc-techdocs-using-odf-storage_{context}"] += Using {odf-name} for file storage + +You can configure {odf-name} to store the files that TechDocs generates instead of relying on other cloud storage solutions. + +{odf-name} provides an `ObjectBucketClaim` custom resource (CR) that you can use to request an S3 compatible bucket backend. You must install the {odf-name} Operator to use this feature. + +.Prerequisites + +* An {ocp-short} administrator has installed the {odf-name} Operator in {ocp-brand-name}. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.15/html/deploying_openshift_data_foundation_using_amazon_web_services/deploy-using-dynamic-storage-devices-aws#installing-openshift-data-foundation-operator-using-the-operator-hub_cloud-storage[{ocp-short} - Installing {company-name} {odf-name} Operator]. +* An {ocp-short} administrator has created an {odf-name} cluster and configured the `StorageSystem` schema. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/latest/html/deploying_openshift_data_foundation_using_amazon_web_services/deploy-using-dynamic-storage-devices-aws#creating-an-openshift-data-foundation-service_cloud-storage[{ocp-short} - Creating an {odf-name} cluster]. + +.Procedure + +* Create an `ObjectBucketClaim` CR where the generated TechDocs files are stored. For example: ++ +[source,yaml] +---- +apiVersion: objectbucket.io/v1alpha1 +kind: ObjectBucketClaim +metadata: + name: +spec: + generateBucketName: + storageClassName: openshift-storage.noobaa.io +---- ++ +[NOTE] +==== +Creating the {product-short} `ObjectBucketClaim` CR automatically creates both the {product-short} `ObjectBucketClaim` config map and secret. The config map and secret have the same name as the `ObjetBucketClaim` CR. +==== + +After you create the `ObjectBucketClaim` CR, you can use the information stored in the config map and secret to make the information accessible to the {product-short} container as environment variables. Depending on the method that you used to install {product-short}, you add the access information to either the {product} Helm chart or Operator configuration. + +[role="_additional-resources"] +.Additional resources + +* For more information about the Object Bucket Claim, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/managing_hybrid_and_multicloud_resources/object-bucket-claim#doc-wrapper[{ocp-short} - Object Bucket Claim]. diff --git a/modules/getting-started/ref-audit-log-catalog-events.adoc b/modules/getting-started/ref-audit-log-catalog-events.adoc new file mode 100644 index 0000000000..b47a7847ce --- /dev/null +++ b/modules/getting-started/ref-audit-log-catalog-events.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// assembly-audit-log.adoc + +:_mod-docs-content-type: REFERENCE +[id="ref-audit-log-catalog-events.adoc_{context}"] += Catalog events + +{product-short} audit logs can include the following catalog events: + +`CatalogEntityAncestryFetch`:: Tracks `GET` requests to the `/entities/by-name/:kind/:namespace/:name/ancestry` endpoint, which returns the ancestry of an entity +`CatalogEntityBatchFetch`:: Tracks `POST` requests to the `/entities/by-refs` endpoint, which returns a batch of entities +`CatalogEntityDeletion`:: Tracks `DELETE` requests to the `/entities/by-uid/:uid` endpoint, which deletes an entity + +[NOTE] +==== +If the parent location of the deleted entity is still present in the catalog, then the entity is restored in the catalog during the next processing cycle. +==== + +`CatalogEntityFacetFetch`:: Tracks `GET` requests to the `/entity-facets` endpoint, which returns the facets of an entity +`CatalogEntityFetch`:: Tracks `GET` requests to the `/entities` endpoint, which returns a list of entities +`CatalogEntityFetchByName`:: Tracks `GET` requests to the `/entities/by-name/:kind/:namespace/:name` endpoint, which returns an entity matching the specified entity reference, for example, `:/` +`CatalogEntityFetchByUid`:: Tracks `GET` requests to the `/entities/by-uid/:uid` endpoint, which returns an entity matching the unique ID of the specified entity +`CatalogEntityRefresh`:: Tracks `POST` requests to the `/entities/refresh` endpoint, which schedules the specified entity to be refreshed +`CatalogEntityValidate`:: Tracks `POST` requests to the `/entities/validate` endpoint, which validates the specified entity +`CatalogLocationCreation`:: Tracks `POST` requests to the `/locations` endpoint, which creates a location + +[NOTE] +==== +A location is a marker that references other places to look for catalog data. +==== + +`CatalogLocationAnalyze`:: Tracks `POST` requests to the `/locations/analyze` endpoint, which analyzes the specified location +`CatalogLocationDeletion`:: Tracks `DELETE` requests to the `/locations/:id` endpoint, which deletes a location and all child entities associated with it +`CatalogLocationFetch`:: Tracks `GET` requests to the `/locations` endpoint, which returns a list of locations +`CatalogLocationFetchByEntityRef`:: Tracks `GET` requests to the `/locations/by-entity` endpoint, which returns a list of locations associated with the specified entity reference +`CatalogLocationFetchById`:: Tracks `GET` requests to the `/locations/:id` endpoint, which returns a location matching the specified location ID +`QueriedCatalogEntityFetch`:: Tracks `GET` requests to the `/entities/by-query` endpoint, which returns a list of entities matching the specified query diff --git a/modules/getting-started/ref-audit-log-fields.adoc b/modules/getting-started/ref-audit-log-fields.adoc new file mode 100644 index 0000000000..0e6de692e1 --- /dev/null +++ b/modules/getting-started/ref-audit-log-fields.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// assembly-audit-log.adoc + +:_mod-docs-content-type: REFERENCE +[id="ref-audit-log-fields.adoc_{context}"] += Audit log fields + +{product-short} audit logs can include the following fields: + +`eventName`:: The name of the audited event. +`actor`:: An object containing information about the actor that triggered the audited event. Contains the following fields: +`actorId`::: The name/id/`entityRef` of the associated user or service. Can be `null` if an unauthenticated user accesses the endpoints and the default authentication policy is disabled. +`ip`::: The IP address of the actor (optional). +`hostname`::: The hostname of the actor (optional). +`client`::: The user agent of the actor (optional). +`stage`:: The stage of the event at the time that the audit log was generated, for example, `initiation` or `completion`. +`status`:: The status of the event, for example, `succeeded` or `failed`. +`meta`:: An optional object containing event specific data, for example, `taskId`. +`request`:: An optional field that contains information about the HTTP request sent to an endpoint. Contains the following fields: +`method`::: The HTTP method of the request. +`query`::: The `query` fields of the request. +`params`::: The `params` fields of the request. +`body`::: The request `body`. The `secrets` provided when creating a task are redacted and appear as `***`. +`url`::: The endpoint URL of the request. +`response`:: An optional field that contains information about the HTTP response sent from an endpoint. Contains the following fields: +`status`::: The status code of the HTTP response. +`body`::: The contents of the request body. +`isAuditLog`:: A flag set to `true` to differentiate audit logs from other log types. +`errors`:: A list of errors containing the `name`, `message` and potentially the `stack` field of the error. Only appears when `status` is `failed`. diff --git a/modules/getting-started/ref-audit-log-file-rotation-overview.adoc b/modules/getting-started/ref-audit-log-file-rotation-overview.adoc new file mode 100644 index 0000000000..ef6f7a0e80 --- /dev/null +++ b/modules/getting-started/ref-audit-log-file-rotation-overview.adoc @@ -0,0 +1,97 @@ +// Module included in the following assembly: +// assembly-audit-log.adoc + +[id="ref-audit-log-file-rotation-overview_{context}"] += Audit log file rotation in {product} + +Logging to a rotating file in {product} is helpful for persistent storage of audit logs. + +Persistent storage ensures that the file remains intact even after a pod is restarted. Audit log file rotation creates a new file at regular intervals, with only new data being written to the latest file. + +Default settings:: + +Audit logging to a rotating file is disabled by default. When it is enabled, the default behavior changes to: + +* Rotate logs at midnight (local system timezone). +* Log file format: `redhat-developer-hub-audit-%DATE%.log`. +* Log files are stored in `/var/log/redhat-developer-hub/audit`. +* No automatic log file deletion. +* No gzip compression of archived logs. +* No file size limit. + +Audit logs are written in the `/var/log/redhat-developer-hub/audit` directory. + +Log file names:: + +Audit log file names are in the following format: + +`redhat-developer-hub-audit-%DATE%.log` + +where `%DATE%` is the format specified in `auditLog.rotateFile.dateFormat`. You can customize file names when you configure file rotation. + +File rotation date and frequency:: + +Supported `auditLog.rotateFile.frequency` options include: + +* `daily`: Rotate daily at 00:00 local time +* `Xm`: Rotate every `X` minutes (where X is a number between 0 and 59) +* `Xh`: Rotate every `X` hours (where X is a number between 0 and 23) +* `test`: Rotate every 1 minute +* `custom`: Use `dateFormat` to set the rotation frequency (default if frequency is not specified) + +If `frequency` is set to `Xh`, `Xm` or `test`, the `dateFormat` setting must be configured in a format that includes the specified time component. Otherwise, the rotation might not work as expected. + +For example, use `dateFormat: 'YYYY-MM-DD-HH` for hourly rotation, and `dateFormat: 'YYYY-MM-DD-HH-mm` for minute rotation. + +.Example minute rotation: +[source,yaml] +---- +auditLog: + rotateFile: + # If you want to rotate the file every 17 minutes + dateFormat: 'YYYY-MM-DD-HH-mm' + frequency: '17m' +---- + +The `dateFormat` setting configures both the `%DATE%` in `logFileName` and the file rotation frequency if `frequency` is set to `custom`. The default format is `YYYY-MM-DD`, meaning daily rotation. Supported values are based on link:https://momentjs.com/docs/#/displaying/format/[Moment.js formats]. + +If the `frequency` is set to `custom`, then rotations take place when the date string, which is represented in the specified `dateFormat`, changes. + +Archive and delete:: + +By default, log files are not archived or deleted. + +Enable and configure audit file rotation:: + +If you are an administrator of {product-short}, you can enable file rotation for audit logs, and configure the file log location, name format, frequency, log file size, retention policy, and archiving. + +.Example audit log file rotation configuration +[source,yaml] +---- +auditLog: + rotateFile: + enabled: true <1> + logFileDirPath: /custom-path <2> + logFileName: custom-audit-log-%DATE%.log <3> + frequency: '12h' <4> + dateFormat: 'YYYY-MM-DD' <5> + utc: false <6> + maxSize: 100m <7> + maxFilesOrDays: 14 <8> + zippedArchive: true <9> +---- +<1> Set `enabled` to `true` to use audit log file rotation. By default, it is set to `false`. +<2> Absolute path to the log file. The specified directory is created automatically if it does not exist. +<3> Default log file name format. +<4> If no frequency is specified, then the default file rotation occurs daily at 00:00 local time. +<5> Default date format. +<6> Set `utc` to `true` to use UTC time for `dateFormat` instead of local time. +<7> Sets a maximum file size limit for the audit log. In this example, the maximum size is 100m. +<8> If set to number of files, for example `14`, then it deletes the oldest log when there are more than 14 log files. If set to number of days, for example `5d`, then it deletes logs older than 5 days. +<9> Archive and compress rotated logs using `gzip`. The default value is `false`. + +[NOTE] +==== +* By default, log files are not archived or deleted. If log deletion is enabled, then a `.-audit.json` is generated in the directory where the logs are to track generated logs. Any log file not contained in the directory is not subject to automatic deletion. +* A new `.-audit.json` file is generated each time the backend starts, which causes previous audit logs to stop being tracked or deleted, except for those still in use by the current backend. +==== \ No newline at end of file diff --git a/modules/getting-started/ref-audit-log-scaffolder-events.adoc b/modules/getting-started/ref-audit-log-scaffolder-events.adoc new file mode 100644 index 0000000000..19422794b9 --- /dev/null +++ b/modules/getting-started/ref-audit-log-scaffolder-events.adoc @@ -0,0 +1,23 @@ +// Module included in the following assemblies: +// assembly-audit-log.adoc + +:_mod-docs-content-type: REFERENCE +[id="ref-audit-log-scaffolder-events.adoc_{context}"] += Scaffolder events + +{product-short} audit logs can include the following scaffolder events: + +`ScaffolderParameterSchemaFetch`:: Tracks `GET` requests to the `/v2/templates/:namespace/:kind/:name/parameter-schema` endpoint which return template parameter schemas +`ScaffolderInstalledActionsFetch`:: Tracks `GET` requests to the `/v2/actions` endpoint which grabs the list of installed actions +`ScaffolderTaskCreation`:: Tracks `POST` requests to the `/v2/tasks` endpoint which creates tasks that the scaffolder executes +`ScaffolderTaskListFetch`:: Tracks `GET` requests to the `/v2/tasks` endpoint which fetches details of all tasks in the scaffolder. +`ScaffolderTaskFetch`:: Tracks `GET` requests to the `/v2/tasks/:taskId` endpoint which fetches details of a specified task `:taskId` +`ScaffolderTaskCancellation`:: Tracks `POST` requests to the `/v2/tasks/:taskId/cancel` endpoint which cancels a running task +`ScaffolderTaskStream`:: Tracks `GET` requests to the `/v2/tasks/:taskId/eventstream` endpoint which returns an event stream of the task logs of task `:taskId` +`ScaffolderTaskEventFetch`:: Tracks `GET` requests to the `/v2/tasks/:taskId/events` endpoint which returns a snapshot of the task logs of task `:taskId` +`ScaffolderTaskDryRun`:: Tracks `POST` requests to the `/v2/dry-run` endpoint which creates a dry-run task. All audit logs for events associated with dry runs have the `meta.isDryLog` flag set to `true`. +`ScaffolderStaleTaskCancellation`:: Tracks automated cancellation of stale tasks +`ScaffolderTaskExecution`:: Tracks the `initiation` and `completion` of a real scaffolder task execution (will not occur during dry runs) +`ScaffolderTaskStepExecution`:: Tracks `initiation` and `completion` of a scaffolder task step execution +`ScaffolderTaskStepSkip`:: Tracks steps skipped due to `if` conditionals not being met +`ScaffolderTaskStepIteration`:: Tracks the step execution of each iteration of a task step that contains the `each` field. diff --git a/modules/getting-started/ref-rhdh-sizing.adoc b/modules/getting-started/ref-rhdh-sizing.adoc new file mode 100644 index 0000000000..d236bc76a1 --- /dev/null +++ b/modules/getting-started/ref-rhdh-sizing.adoc @@ -0,0 +1,33 @@ +[id='ref-rhdh-sizing_{context}'] += Sizing requirements for {product} + +Scalability of {product} requires significant resource allocation. The following table lists the sizing requirements for installing and running {product}, including {product-short} application, database components, and Operator. + +.Recommended sizing for running {product} +[cols="25%,25%,25%,25%", frame="all", options="header"] +|=== +|Components +|{product} application +|{product} database +|{product} Operator + +|Central Processing Unit (CPU) +|4 vCPU +|2 vCPU +|1 vCPU + +|Memory +|16 GB +|8 GB +|1500 Mi + +|Storage size +|2 GB +|20 GB +|50 Mi + +|Replicas +|2 or more +|3 or more +|1 or more +|=== \ No newline at end of file diff --git a/modules/getting-started/ref-rhdh-supported-configs.adoc b/modules/getting-started/ref-rhdh-supported-configs.adoc new file mode 100644 index 0000000000..4b15c0bc16 --- /dev/null +++ b/modules/getting-started/ref-rhdh-supported-configs.adoc @@ -0,0 +1,7 @@ +[id='ref-rhdh-supported-configs_{context}'] += Supported configurations for {product} + +This section describes the configurations that are required to access the {product}, including: + +* Custom applications configuration +* Source control configuration for {product-short} Catalog \ No newline at end of file diff --git a/modules/getting-started/ref-supported-servicenow-custom-actions.adoc b/modules/getting-started/ref-supported-servicenow-custom-actions.adoc new file mode 100644 index 0000000000..9bb34ac420 --- /dev/null +++ b/modules/getting-started/ref-supported-servicenow-custom-actions.adoc @@ -0,0 +1,417 @@ +[id='ref-supported-servicenow-custom-actions_{context}'] += Supported ServiceNow custom actions in {product} + +The ServiceNow custom actions enable you to manage records in the {product}. The custom actions support the following HTTP methods for API requests: + +* `GET`: Retrieves specified information from a specified resource endpoint +* `POST`: Creates or updates a resource +* `PUT`: Modify a resource +* `PATCH`: Updates a resource +* `DELETE`: Deletes a resource + +== ServiceNow custom actions + +[GET] servicenow:now:table:retrieveRecord:: ++ +-- +Retrieves information of a specified record from a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to retrieve the record from + +|`sysId` +|`string` +|Required +|Unique identifier of the record to retrieve + +|`sysparmDisplayValue` +|`enum("true", "false", "all")` +|Optional +|Returns field display values such as `true`, actual values as `false`, or both. The default value is `false`. + +|`sysparmExcludeReferenceLink` +|`boolean` +|Optional +|Set as `true` to exclude Table API links for reference fields. The default value is `false`. + +|`sysparmFields` +|`string[]` +|Optional +|Array of fields to return in the response + +|`sysparmView` +|`string` +|Optional +|Renders the response according to the specified UI view. You can override this parameter using `sysparm_fields`. + +|`sysparmQueryNoDomain` +|`boolean` +|Optional +|Set as `true` to access data across domains if authorized. The default value is `false`. +|=== + + +.Output parameters +[cols="15%,35%,50%", frame="all", options="header"] +|=== +|Name +|Type +|Description + +|`result` +|`Record` +|The response body of the request +|=== +-- + + +[GET] servicenow:now:table:retrieveRecords:: ++ +-- +Retrieves information about multiple records from a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to retrieve the records from + +|`sysparamQuery` +|`string` +|Optional +|Encoded query string used to filter the results + +|`sysparmDisplayValue` +|`enum("true", "false", "all")` +|Optional +|Returns field display values such as `true`, actual values as `false`, or both. The default value is `false`. + +|`sysparmExcludeReferenceLink` +|`boolean` +|Optional +|Set as `true` to exclude Table API links for reference fields. The default value is `false`. + +|`sysparmSuppressPaginationHeader` +|`boolean` +|Optional +|Set as `true` to suppress pagination header. The default value is `false`. + +|`sysparmFields` +|`string[]` +|Optional +|Array of fields to return in the response + +|`sysparmLimit` +|`int` +|Optional +|Maximum number of results returned per page. The default value is `10,000`. + +|`sysparmView` +|`string` +|Optional +|Renders the response according to the specified UI view. You can override this parameter using `sysparm_fields`. + +|`sysparmQueryCategory` +|`string` +|Optional +|Name of the query category to use for queries + +|`sysparmQueryNoDomain` +|`boolean` +|Optional +|Set as `true` to access data across domains if authorized. The default value is `false`. + +|`sysparmNoCount` +|`boolean` +|Optional +|Does not execute a select count(*) on the table. The default value is `false`. +|=== + + +.Output parameters +[cols="15%,35%,50%", frame="all", options="header"] +|=== +|Name +|Type +|Description + +|`result` +|`Record` +|The response body of the request +|=== +-- + + +[POST] servicenow:now:table:createRecord:: ++ +-- +Creates a record in a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to save the record in + +|`requestBody` +|`Record` +|Optional +|Field name and associated value for each parameter to define in the specified record + +|`sysparmDisplayValue` +|`enum("true", "false", "all")` +|Optional +|Returns field display values such as `true`, actual values as `false`, or both. The default value is `false`. + +|`sysparmExcludeReferenceLink` +|`boolean` +|Optional +|Set as `true` to exclude Table API links for reference fields. The default value is `false`. + +|`sysparmFields` +|`string[]` +|Optional +|Array of fields to return in the response + +|`sysparmInputDisplayValue` +|`boolean` +|Optional +|Set field values using their display value such as `true` or actual value as `false`. The default value is `false`. + +|`sysparmSuppressAutoSysField` +|`boolean` +|Optional +|Set as `true` to suppress auto-generation of system fields. The default value is `false`. + +|`sysparmView` +|`string` +|Optional +|Renders the response according to the specified UI view. You can override this parameter using `sysparm_fields`. +|=== + + +.Output parameters +[cols="15%,35%,50%", frame="all", options="header"] +|=== +|Name +|Type +|Description + +|`result` +|`Record` +|The response body of the request +|=== +-- + + +[PUT] servicenow:now:table:modifyRecord:: ++ +-- +Modifies a record in a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to modify the record from + +|`sysId` +|`string` +|Required +|Unique identifier of the record to modify + +|`requestBody` +|`Record` +|Optional +|Field name and associated value for each parameter to define in the specified record + +|`sysparmDisplayValue` +|`enum("true", "false", "all")` +|Optional +|Returns field display values such as `true`, actual values as `false`, or both. The default value is `false`. + +|`sysparmExcludeReferenceLink` +|`boolean` +|Optional +|Set as `true` to exclude Table API links for reference fields. The default value is `false`. + +|`sysparmFields` +|`string[]` +|Optional +|Array of fields to return in the response + +|`sysparmInputDisplayValue` +|`boolean` +|Optional +|Set field values using their display value such as `true` or actual value as `false`. The default value is `false`. + +|`sysparmSuppressAutoSysField` +|`boolean` +|Optional +|Set as `true` to suppress auto-generation of system fields. The default value is `false`. + +|`sysparmView` +|`string` +|Optional +|Renders the response according to the specified UI view. You can override this parameter using `sysparm_fields`. + +|`sysparmQueryNoDomain` +|`boolean` +|Optional +|Set as `true` to access data across domains if authorized. The default value is `false`. +|=== + + +.Output parameters +[cols="15%,35%,50%", frame="all", options="header"] +|=== +|Name +|Type +|Description + +|`result` +|`Record` +|The response body of the request +|=== +-- + + +[PATCH] servicenow:now:table:updateRecord:: ++ +-- +Updates a record in a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to update the record in + +|`sysId` +|`string` +|Required +|Unique identifier of the record to update + +|`requestBody` +|`Record` +|Optional +|Field name and associated value for each parameter to define in the specified record + +|`sysparmDisplayValue` +|`enum("true", "false", "all")` +|Optional +|Returns field display values such as `true`, actual values as `false`, or both. The default value is `false`. + +|`sysparmExcludeReferenceLink` +|`boolean` +|Optional +|Set as `true` to exclude Table API links for reference fields. The default value is `false`. + +|`sysparmFields` +|`string[]` +|Optional +|Array of fields to return in the response + +|`sysparmInputDisplayValue` +|`boolean` +|Optional +|Set field values using their display value such as `true` or actual value as `false`. The default value is `false`. + +|`sysparmSuppressAutoSysField` +|`boolean` +|Optional +|Set as `true` to suppress auto-generation of system fields. The default value is `false`. + +|`sysparmView` +|`string` +|Optional +|Renders the response according to the specified UI view. You can override this parameter using `sysparm_fields`. + +|`sysparmQueryNoDomain` +|`boolean` +|Optional +|Set as `true` to access data across domains if authorized. The default value is `false`. +|=== + + +.Output parameters +[cols="15%,35%,50%", frame="all", options="header"] +|=== +|Name +|Type +|Description + +|`result` +|`Record` +|The response body of the request +|=== +-- + + +[DELETE] servicenow:now:table:deleteRecord:: ++ +-- +Deletes a record from a table in the {product-short}. + +.Input parameters +[cols="15%,25%,15%,45%", frame="all", options="header"] +|=== +|Name +|Type +|Requirement +|Description + +|`tableName` +|`string` +|Required +|Name of the table to delete the record from + +|`sysId` +|`string` +|Required +|Unique identifier of the record to delete + +|`sysparmQueryNoDomain` +|`boolean` +|Optional +|Set as `true` to access data across domains if authorized. The default value is `false`. +|=== +-- \ No newline at end of file diff --git a/modules/getting-started/ref-techdocs-example-config-plugin-helm.adoc b/modules/getting-started/ref-techdocs-example-config-plugin-helm.adoc new file mode 100644 index 0000000000..53e87aefd3 --- /dev/null +++ b/modules/getting-started/ref-techdocs-example-config-plugin-helm.adoc @@ -0,0 +1,31 @@ +:_mod-docs-content-type: REFERENCE +[id="ref-techdocs-example-config-plugin-helm_{context}"] += Example TechDocs Plugin configuration for the Helm chart + +The following example shows a {product-short} Helm chart configuration for the TechDocs plugin: + +[source,yaml] +---- +global: + dynamic: + includes: + - 'dynamic-plugins.default.yaml' + plugins: + - disabled: false + package: ./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic + pluginConfig: + techdocs: + builder: external + generator: + runIn: local + publisher: + awsS3: + bucketName: '${BUCKET_NAME}' + credentials: + accessKeyId: '${AWS_ACCESS_KEY_ID}' + secretAccessKey: '${AWS_SECRET_ACCESS_KEY}' + endpoint: 'https://${BUCKET_HOST}' + region: '${BUCKET_REGION}' + s3ForcePathStyle: true + type: awsS3 +---- diff --git a/modules/getting-started/ref-techdocs-example-config-plugin-operator.adoc b/modules/getting-started/ref-techdocs-example-config-plugin-operator.adoc new file mode 100644 index 0000000000..0e84f23bda --- /dev/null +++ b/modules/getting-started/ref-techdocs-example-config-plugin-operator.adoc @@ -0,0 +1,35 @@ +:_mod-docs-content-type: REFERENCE +[id="ref-techdocs-example-config-plugin-operator_{context}"] += Example TechDocs Plugin configuration for the Operator + +The following example shows a {product} Operator config map configuration for the TechDocs plugin: + +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dynamic-plugins-rhdh +data: + dynamic-plugins.yaml: | + includes: + - dynamic-plugins.default.yaml + plugins: + - disabled: false + package: ./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic + pluginConfig: + techdocs: + builder: external + generator: + runIn: local + publisher: + awsS3: + bucketName: '${BUCKET_NAME}' + credentials: + accessKeyId: '${AWS_ACCESS_KEY_ID}' + secretAccessKey: '${AWS_SECRET_ACCESS_KEY}' + endpoint: 'https://${BUCKET_HOST}' + region: '${BUCKET_REGION}' + s3ForcePathStyle: true + type: awsS3 +---- diff --git a/modules/importing-repositories/procedure-enabling-the-bulk-import-from-github-feature.adoc b/modules/importing-repositories/procedure-enabling-the-bulk-import-from-github-feature.adoc new file mode 100644 index 0000000000..b2f05fd5bd --- /dev/null +++ b/modules/importing-repositories/procedure-enabling-the-bulk-import-from-github-feature.adoc @@ -0,0 +1,40 @@ +[id="enabling-and-giving-access-to-the-bulk-import-feature"] += Enabling and giving access to the Bulk Import feature +You can enable the Bulk Import feature for users and give them the necessary permissions to access it. + +.Prerequisites +* You have link:{authentication-book-url}#enabling-authentication-with-github[configured GitHub authentication and integration]. + +.Procedure + +. The Bulk Import plugins are installed but disabled by default. +To enable the `./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import-backend-dynamic` and `./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import` plugins, +edit your `dynamic-plugins.yaml` with the following content: ++ +.`dynamic-plugins.yaml` fragment +[source,yaml] +---- +plugins: + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import-backend-dynamic + disabled: false + - package: ./dynamic-plugins/dist/janus-idp-backstage-plugin-bulk-import + disabled: false +---- ++ +See link:{installing-and-viewing-dynamic-plugins-url}[{installing-and-viewing-dynamic-plugins-title}]. + +. Configure the required `bulk.import` RBAC permission for the users who are not administrators as follows: ++ +.`rbac-policy.csv` fragment +[source,csv,subs="+quotes"] +---- +p, role:default/bulk-import, bulk.import, use, allow +g, user:default/____, role:default/bulk-import +---- ++ +Note that only {product-short} administrators or users with the `bulk.import` permission can use the Bulk Import feature. See link:{authorization-book-url}#ref-rbac-permission-policies_title-authorization[Permission policies in Red Hat Developer Hub]. + +.Verification +* The sidebar displays a *Bulk Import* option. +* The *Bulk Import* page shows a list of *Added Repositories*. + diff --git a/modules/importing-repositories/procedure-importing-multiple-repositories-from-github.adoc b/modules/importing-repositories/procedure-importing-multiple-repositories-from-github.adoc new file mode 100644 index 0000000000..b0c38724ff --- /dev/null +++ b/modules/importing-repositories/procedure-importing-multiple-repositories-from-github.adoc @@ -0,0 +1,37 @@ +[id="importing-multiple-github-repositories"] += Importing multiple GitHub repositories + +In {product}, you can select your GitHub repositories and automate their onboarding to the {product-short} catalog. + +.Prerequisites +* You have link:{authentication-book-url}#enabling-authentication-with-github[configured GitHub authentication and integration]. +* You have xref:enabling-and-giving-access-to-the-bulk-import-feature[enabled the Bulk Import feature and gave access to it]. + +.Procedure +. Click *Bulk Import* in the left sidebar. +. Click the *Add* button in the top-right corner to see the list of all repositories accessible from the configured GitHub integrations. +.. From the *Repositories* view, you can select any repository, or search for any accessible repositories. +For each repository selected, a `catalog-info.yaml` is generated. +.. From the *Organizations* view, you can select any organization by clicking *Select* in the third column. +This option allows you to select one or more repositories from the selected organization. +. Click *Preview file* to view or edit the details of the pull request for each repository. +.. Review the pull request description and the `catalog-info.yaml` file content. +.. Optional: when the repository has a `.github/CODEOWNERS` file, you can select the *Use CODEOWNERS file as Entity Owner* checkbox to use it, rather than having the `content-info.yaml` contain a specific entity owner. +.. Click *Save*. +. Click *Create pull requests*. +At this point, a set of dry-run checks runs against the selected repositories to ensure they meet the requirements for import, such as: +.. Verifying that there is no entity in the {product-short} catalog with the name specified in the repository `catalog-info.yaml` +.. Verifying that the repository is not empty +.. Verifying that the repository contains a `.github/CODEOWNERS` file if the *Use CODEOWNERS file as Entity Owner* checkbox is selected for that repository + +** If any errors occur, the pull requests are not created, and you see a _Failed to create PR_ error message detailing the issues. +To view more details about the reasons, click *Edit*. + +** If there are no errors, the pull requests are created, and you are redirected to the list of added repositories. + +. Review and merge each pull request that creates a `catalog-info.yml` file. + +.Verification +* The *Added repositories* list displays the repositories you imported, each with an appropriate status: either _Waiting for approval_ or _Added_. +* For each _Waiting for approval_ import job listed, there is a corresponding pull request adding the `catalog-info.yaml` file in the corresponding repository. + diff --git a/modules/importing-repositories/procedure-managing-the-imported-repository-list.adoc b/modules/importing-repositories/procedure-managing-the-imported-repository-list.adoc new file mode 100644 index 0000000000..5eb2d5e4b4 --- /dev/null +++ b/modules/importing-repositories/procedure-managing-the-imported-repository-list.adoc @@ -0,0 +1,29 @@ +[id="managing-the-added-repositories"] += Managing the added repositories +You can oversee and manage the repositories that are imported to the {product-short}. + +.Prerequisites +* You have xref:importing-multiple-github-repositories[imported GitHub repositories]. + + +.Procedure +. Click *Bulk Import* in the left sidebar to display all the current repositories that are being tracked as Import jobs, along with their status. + +Added:: The repository is added to the {product-short} catalog after the import pull request is merged or if the repository already contained a `catalog-info.yaml` file during the bulk import. +Note that it may take a few minutes for the entities to be available in the catalog. + +Waiting for approval:: There is an open pull request adding a `catalog-info.yaml` file to the repository. +You can: +* Click the *pencil icon* on the right to see details about the pull request or edit the pull request content right from {product-short}. +* Delete the Import job, this action closes the import PR as well. +* To transition the Import job to the _Added_ state, merge the import pull request from the Git repository. + +Empty:: {product-short} is unable to determine the import job status because the repository is imported from other sources but does not have a `catalog-info.yaml` file and lacks any import pull request adding it. + +[NOTE] +==== +* After an import pull request is merged, the import status is marked as _Added_ in the list of Added Repositories, but it might take a few seconds for the corresponding entities to appear in the {product-short} Catalog. +* A location added through other sources (like statically in an `app-config.yaml` file, dynamically when link:{linkgettingstartedguide}#enabling-github-discovery-in-red-hat-developer-hub[enabling GitHub discovery], or registered manually using the "Register an existing component" page) might show up in the Bulk Import list of Added Repositories if the following conditions are met: +** The target repository is accessible from the configured GitHub integrations. +** The location URL points to a `catalog-info.yaml` file at the root of the repository default branch. +==== diff --git a/modules/importing-repositories/procedure-understanding-bulk-import-audit-logs.adoc b/modules/importing-repositories/procedure-understanding-bulk-import-audit-logs.adoc new file mode 100644 index 0000000000..b88ff09f4d --- /dev/null +++ b/modules/importing-repositories/procedure-understanding-bulk-import-audit-logs.adoc @@ -0,0 +1,70 @@ +[id="understanding-bulk-import-audit-logs"] += Understanding the Bulk Import audit Logs + +The Bulk Import backend plugin adds the following events to the {product-short} audit logs. +See link:{linkgettingstartedguide}#assembly-audit-log[Audit Logs in {product}] for more information on how to configure and view audit logs. + +*Bulk Import Events*: + +`BulkImportUnknownEndpoint`:: +Tracks requests to unknown endpoints. + +`BulkImportPing`:: +Tracks `GET` requests to the `/ping` endpoint, which allows us to make sure the bulk import backend is up and running. + +`BulkImportFindAllOrganizations`:: +Tracks `GET` requests to the `/organizations` endpoint, which returns the list of organizations accessible from all configured GitHub Integrations. + +`BulkImportFindRepositoriesByOrganization`:: +Tracks `GET` requests to the `/organizations/:orgName/repositories` endpoint, which returns the list of repositories for the specified organization (accessible from any of the configured GitHub Integrations). + +`BulkImportFindAllRepositories`:: +Tracks GET requests to the `/repositories` endpoint, which returns the list of repositories accessible from all configured GitHub Integrations. + +`BulkImportFindAllImports`:: +Tracks `GET` requests to the `/imports` endpoint, which returns the list of existing import jobs along with their statuses. + +`BulkImportCreateImportJobs`:: +Tracks `POST` requests to the `/imports` endpoint, which allows to submit requests to bulk-import one or many repositories into the {product-short} catalog, by eventually creating import pull requests in the target repositories. + +`BulkImportFindImportStatusByRepo`:: +Tracks `GET` requests to the `/import/by-repo` endpoint, which fetches details about the import job for the specified repository. + +`BulkImportDeleteImportByRepo`:: +Tracks `DELETE` requests to the `/import/by-repo` endpoint, which deletes any existing import job for the specified repository, by closing any open import pull request that could have been created. + +.Example bulk import audit logs +[source,json] +---- +{ + "actor": { + "actorId": "user:default/myuser", + "hostname": "localhost", + "ip": "::1", + "userAgent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + }, + "eventName": "BulkImportFindAllOrganizations", + "isAuditLog": true, + "level": "info", + "message": "'get /organizations' endpoint hit by user:default/myuser", + "meta": {}, + "plugin": "bulk-import", + "request": { + "body": {}, + "method": "GET", + "params": {}, + "query": { + "pagePerIntegration": "1", + "sizePerIntegration": "5" + }, + "url": "/api/bulk-import/organizations?pagePerIntegration=1&sizePerIntegration=5" + }, + "response": { + "status": 200 + }, + "service": "backstage", + "stage": "completion", + "status": "succeeded", + "timestamp": "2024-08-26 16:41:02" +} +---- diff --git a/modules/installation/con-airgapped-environment.adoc b/modules/installation/con-airgapped-environment.adoc new file mode 100644 index 0000000000..3d91758448 --- /dev/null +++ b/modules/installation/con-airgapped-environment.adoc @@ -0,0 +1,6 @@ +[id='con-airgapped-environment_{context}'] += Air-gapped environment + +An air-gapped environment, also known as an air-gapped network or isolated network, ensures security by physically segregating the system or network. This isolation is established to prevent unauthorized access, data transfer, or communication between the air-gapped system and external sources. + +You can install the {product} in an air-gapped environment to ensure security and meet specific regulatory requirements. diff --git a/modules/installation/con-rhdh-overview.adoc b/modules/installation/con-rhdh-overview.adoc new file mode 100644 index 0000000000..8145955513 --- /dev/null +++ b/modules/installation/con-rhdh-overview.adoc @@ -0,0 +1,12 @@ +[id='con-rhdh-overview_{context}'] += Overview of {product} + +{product} ({product-short}) serves as an open developer platform designed for building developer portals and is based on the link:https://backstage.io/[backstage] project. Using {product-short}, the engineering teams can access a unified platform that streamlines the development process and provides a variety of tools and resources to build high-quality software efficiently. + +The goal of {product-short} is to address the difficulties associated with creating and sustaining developer portals using: + +* A centralized dashboard to view all available developer tools and resources to increase productivity +* Self-service capabilities, along with guardrails, for cloud-native application development that complies with enterprise-class best practices +* Proper security and governance for all developers across the enterprise + +The {product} simplifies decision-making by providing a developer experience that presents a selection of internally approved tools, programming languages, and various developer resources within a self-managed portal. This approach contributes to the acceleration of application development and the maintenance of code quality, all while fostering innovation. diff --git a/modules/installation/proc-add-custom-app-config-file-ocp-operator.adoc b/modules/installation/proc-add-custom-app-config-file-ocp-operator.adoc new file mode 100644 index 0000000000..b027b9fd6c --- /dev/null +++ b/modules/installation/proc-add-custom-app-config-file-ocp-operator.adoc @@ -0,0 +1,99 @@ +[id="proc-add-custom-app-config-file-ocp-operator_{context}"] += Adding a custom application configuration file to {ocp-short} using the Operator + +A custom application configuration file is a `ConfigMap` object that you can use to change the configuration of your {product} instance. If you are deploying your {product-short} instance on {ocp-brand-name}, you can use the {product} Operator to add a custom application configuration file to your {ocp-short} instance by creating the `ConfigMap` object and referencing it in the {product-short} custom resource (CR). + +The custom application configuration file contains a sensitive environment variable, named `BACKEND_SECRET`. This variable contains a mandatory backend authentication key that {product-short} uses to reference an environment variable defined in an {ocp-short} secret. You must create a secret, named 'secrets-rhdh', and reference it in the {product-short} CR. + +[NOTE] +==== +You are responsible for protecting your {product} installation from external and unauthorized access. Manage the backend authentication key like any other secret. Meet strong password requirements, do not expose it in any configuration files, and only inject it into configuration files as an environment variable. +==== + +.Prerequisites +* You have an active {ocp-brand-name} account. +* Your administrator has installed the {product} Operator in {ocp-short}. For more information, see link:{LinkAdminGuide}[Installing the {product} Operator]. +* You have created the {product} CR in {ocp-short}. + +.Procedure +. From the *Developer* perspective in the {ocp-short} web console, select the *Topology* view, and click the *Open URL* icon on the {product-short} pod to identify your {product-short} external URL: __<{product-very-short}_URL>__. +. From the *Developer* perspective in the {ocp-short} web console, select the *ConfigMaps* view. +. Click *Create ConfigMap*. +. Select the *YAML view* option in *Configure via* and use the following example as a base template to create a `ConfigMap` object, such as `app-config-rhdh.yaml`: ++ +[source,yaml,subs="attributes+"] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + app: + title: {product} + baseUrl: <{product-very-short}_URL> # <1> + backend: + auth: + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" # <2> + baseUrl: <{product-very-short}_URL> # <3> + cors: + origin: <{product-very-short}_URL> # <4> +---- ++ +<1> Set the external URL of your {product} instance. +<2> Use an environment variable exposing an {ocp-short} secret to define the mandatory {product-short} backend authentication key. +<3> Set the external URL of your {product} instance. +<4> Set the external URL of your {product} instance. + +. Click *Create*. +. Select the *Secrets* view. +. Click *Create Key/value Secret*. +. Create a secret named `secrets-rhdh`. +. Add a key named `BACKEND_SECRET` and a base64 encoded string as a value. Use a unique value for each {product} instance. For example, you can use the following command to generate a key from your terminal: ++ +[source,yaml] +---- +node -p 'require("crypto").randomBytes(24).toString("base64")' +---- + +. Click *Create*. +. Select the *Topology* view. +. Click the overflow menu for the {product} instance that you want to use and select *Edit Backstage* to load the YAML view of the {product} instance. ++ +image::rhdh/operator-install-2.png[] + +. In the CR, enter the name of the custom application configuration config map as the value for the `spec.application.appConfig.configMaps` field, and enter the name of your secret as the value for the `spec.application.extraEnvs.secrets` field. For example: ++ +[source, yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: example +spec: + application: + appConfig: + mountPath: /opt/app-root/src + configMaps: + - name: app-config-rhdh + extraEnvs: + secrets: + - name: secrets-rhdh + extraFiles: + mountPath: /opt/app-root/src + replicas: 1 + route: + enabled: true + database: + enableLocalDb: true +---- +. Click *Save*. +. Navigate back to the *Topology* view and wait for the {product} pod to start. +. Click the *Open URL* icon to use the {product} platform with the configuration changes. + +.Additional resources +* For more information about roles and responsibilities in {product-short}, see link:{LinkAdminGuide}[Role-Based Access Control (RBAC) in {product}]. diff --git a/modules/installation/proc-configuring-an-rhdh-instance-with-tls-in-kubernetes.adoc b/modules/installation/proc-configuring-an-rhdh-instance-with-tls-in-kubernetes.adoc new file mode 100644 index 0000000000..39e432151f --- /dev/null +++ b/modules/installation/proc-configuring-an-rhdh-instance-with-tls-in-kubernetes.adoc @@ -0,0 +1,143 @@ +[id="proc-configuring-an-rhdh-instance-with-tls-in-kubernetes_{context}"] += Configuring an {product-very-short} instance with a TLS connection in Kubernetes + +You can configure an {product-very-short} instance with a Transport Layer Security (TLS) connection in a Kubernetes cluster, such as an Azure Red Hat OpenShift (ARO) cluster, any cluster from a supported cloud provider, or your own cluster with proper configuration. However, You must use a public Certificate Authority (CA)-signed certificate to configure your Kubernetes cluster. + +.Prerequisites + +* You have set up an Azure Red Hat OpenShift (ARO) cluster with a public CA-signed certificate. For more information about obtaining CA certificates, refer to your vendor documentation. +* You have created a namespace and setup a service account with proper read permissions on resources. ++ +.Example: Kubernetes manifest for role-based access control +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: backstage-read-only +rules: + - apiGroups: + - '*' + resources: + - pods + - configmaps + - services + - deployments + - replicasets + - horizontalpodautoscalers + - ingresses + - statefulsets + - limitranges + - resourcequotas + - daemonsets + verbs: + - get + - list + - watch +#... +---- + +* You have obtained the secret and the service CA certificate associated with your service account. +* You have created some resources and added annotations to them so they can be discovered by the Kubernetes plugin. You can apply these Kubernetes annotations: + +** `backstage.io/kubernetes-id` to label components +** `backstage.io/kubernetes-namespace` to label namespaces + + +.Procedure + +. Enable the Kubernetes plugins in the `dynamic-plugins-rhdh.yaml` file: ++ +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: dynamic-plugins-rhdh +data: + dynamic-plugins.yaml: | + includes: + - dynamic-plugins.default.yaml + plugins: +      - package: ./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic +        disabled: false <1> +      - package: ./dynamic-plugins/dist/backstage-plugin-kubernetes +        disabled: false <2> + # ... +---- +<1> Set the value to `false` to enable the `backstage-plugin-kubernetes-backend-dynamic` plugin. +<2> Set the value to `false` to enable the `backstage-plugin-kubernetes` plugin. ++ +[NOTE] +==== +The `backstage-plugin-kubernetes` plugin is currently in link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. As an alternative, you can use the `./dynamic-plugins/dist/backstage-plugin-topology-dynamic` plugin, which is Generally Available (GA). +==== + +. Set the kubernetes cluster details and configure the catalog sync options in the `app-config-rhdh.yaml` file: ++ +[source,yaml] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + # ... +  catalog: +    rules: +      - allow: [Component, System, API, Resource, Location] +    providers: +      kubernetes: +        openshift: +          cluster: openshift +          processor: +            namespaceOverride: default +            defaultOwner: guests +          schedule: +            frequency: +              seconds: 30 +            timeout: +              seconds: 5 +  kubernetes: +    serviceLocatorMethod: +      type: 'multiTenant' +    clusterLocatorMethods: +      - type: 'config' +        clusters: +          - url: <1> +            name: openshift +            authProvider: 'serviceAccount' +            skipTLSVerify: false <2> +            skipMetricsLookup: true +            dashboardUrl: <3> +            dashboardApp: openshift +            serviceAccountToken: ${K8S_SERVICE_ACCOUNT_TOKEN} <4> +            caData: ${K8S_CONFIG_CA_DATA} <5> +            # ... +---- +<1> The base URL to the Kubernetes control plane. You can run the `kubectl cluster-info` command to get the base URL. +<2> Set the value of this parameter to `false` to enable the verification of the TLS certificate. +<3> Optional: The link to the Kubernetes dashboard managing the ARO cluster. +<4> Optional: Pass the service account token using a `K8S_SERVICE_ACCOUNT_TOKEN` environment variable that you can define in your `secrets-rhdh` secret. +<5> Pass the CA data using a `K8S_CONFIG_CA_DATA` environment variable that you can define in your `secrets-rhdh` secret. + +. Save the configuration changes. + +.Verification + +. Run the {product-very-short} application to import your catalog: ++ +[source,terminal] +---- +kubectl -n rhdh-operator get pods -w +---- + +. Verify that the pod log shows no errors for your configuration. +. Go to *Catalog* and check the component page in the {product-short} instance to verify the cluster connection and the presence of your created resources. + +[NOTE] +==== +If you encounter connection errors, such as certificate issues or permissions, check the message box in the component page or view the logs of the pod. +==== + diff --git a/modules/installation/proc-deploy-rhdh-instance-eks.adoc b/modules/installation/proc-deploy-rhdh-instance-eks.adoc new file mode 100644 index 0000000000..14deafb8aa --- /dev/null +++ b/modules/installation/proc-deploy-rhdh-instance-eks.adoc @@ -0,0 +1,158 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-install-rhdh-eks.adoc + +[id="proc-deploy-rhdh-instance-eks.adoc_{context}"] += Deploying the {product-short} instance on {eks-short} with the Operator + +.Prerequisites + +* A cluster administrator has installed the {product} Operator. +* You have an {eks-short} cluster with {aws-short} Application Load Balancer (ALB) add-on installed. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html[Application load balancing on {eks-brand-name}] and https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html[Installing the AWS Load Balancer Controller add-on]. +* You have configured a domain name for your {product-short} instance. The domain name can be a hosted zone entry on Route 53 or managed outside of AWS. For more information, see https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-configuring.html[Configuring Amazon Route 53 as your DNS service] documentation. +* You have an entry in the {aws-short} Certificate Manager (ACM) for your preferred domain name. Make sure to keep a record of your Certificate ARN. +* You have subscribed to `registry.redhat.io`. For more information, see https://access.redhat.com/RegistryAuthentication[{company-name} Container Registry Authentication]. +* You have set the context to the {eks-short} cluster in your current `kubeconfig`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html[Creating or updating a kubeconfig file for an Amazon {eks-short} cluster]. +* You have installed `kubectl`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html[Installing or updating kubectl]. + +.Procedure + +. Create a ConfigMap named `app-config-rhdh` containing the {product-short} configuration using the following template: ++ +-- +[source,yaml,subs="attributes+"] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + app: + title: {product} + baseUrl: https:// + backend: + auth: + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" + baseUrl: https:// + cors: + origin: https:// +---- +-- + +. Create a Secret named `secrets-rhdh` and add a key named `BACKEND_SECRET` with a `Base64-encoded` string as value: ++ +-- +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: secrets-rhdh +stringData: + # TODO: See https://backstage.io/docs/auth/service-to-service-auth/#setup + BACKEND_SECRET: "xxx" +---- + +[IMPORTANT] +==== +Ensure that you use a unique value of `BACKEND_SECRET` for each {product-short} instance. +==== + +You can use the following command to generate a key: + +[source,terminal] +---- +node-p'require("crypto").randomBytes(24).toString("base64")' +---- +-- + +. To enable pulling the PostgreSQL image from the {company-name} Ecosystem Catalog, add the image pull secret in the default service account within the namespace where the {product-short} instance is being deployed: ++ +-- +[source,terminal] +---- +kubectl patch serviceaccount default \ + -p '{"imagePullSecrets": [{"name": "rhdh-pull-secret"}]}' \ + -n +---- +-- + +. Create a Custom Resource file using the following template: ++ +-- +[source,yaml,subs="attributes+"] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + # TODO: this the name of your {product-short} instance + name: my-rhdh +spec: + application: + imagePullSecrets: + - "rhdh-pull-secret" + route: + enabled: false + appConfig: + configMaps: + - name: "app-config-rhdh" + extraEnvs: + secrets: + - name: "secrets-rhdh" +---- +-- + +. Create an Ingress resource using the following template, ensuring to customize the names as needed: ++ +-- +[source,yaml,subs="attributes+"] +---- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + # TODO: this the name of your {product-short} Ingress + name: my-rhdh + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + + alb.ingress.kubernetes.io/target-type: ip + + # TODO: Using an ALB HTTPS Listener requires a certificate for your own domain. Fill in the ARN of your certificate, e.g.: + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-xxx:xxxx:certificate/xxxxxx + + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + + alb.ingress.kubernetes.io/ssl-redirect: '443' + + # TODO: Set your application domain name. + external-dns.alpha.kubernetes.io/hostname: + +spec: + ingressClassName: alb + rules: + # TODO: Set your application domain name. + - host: + http: + paths: + - path: / + pathType: Prefix + backend: + service: + # TODO: my-rhdh is the name of your Backstage Custom Resource. + # Adjust if you changed it! + name: backstage-my-rhdh + port: + name: http-backend +---- + +In the previous template, replace ` ` with your {product-short} domain name and update the value of `alb.ingress.kubernetes.io/certificate-arn` with your certificate ARN. +-- + +.Verification + +Wait until the DNS name is responsive, indicating that your {product-short} instance is ready for use. diff --git a/modules/installation/proc-enable-signin.adoc b/modules/installation/proc-enable-signin.adoc new file mode 100644 index 0000000000..706fa9e393 --- /dev/null +++ b/modules/installation/proc-enable-signin.adoc @@ -0,0 +1,89 @@ +[id='proc-enable-signin_{context}'] += Enabling authentication in {product} + +Authentication within the {product} facilitates user sign-in, identification, and access to external resources. It supports multiple authentication providers, though one is typically configured for sign-in while the others are used for external resources. + +The {product} supports the following authentication providers: + +.Supported authentication providers +[cols="50%,50%", frame="all", options="header"] +|=== +|Authentication provider +|Value + +|Auth0 +|`auth0` + +|Atlassian +|`atlassian` + +|Azure +|`microsoft` + +|Azure Easy Auth +|`azure-easyauth` + +|Bitbucket +|`bitbucket` + +|Bitbucket Server +|`bitbucketServer` + +|Cloudflare Access +|`cfaccess` + +|GitHub +|`github` + +|GitLab +|`gitlab` + +|Google +|`google` + +|Google IAP +|`gcp-iap` + +|OIDC +|`oidc` + +|Okta +|`okta` + +|OAuth2 Custom Proxy +|`oauth2Proxy` + +|OneLogin +|`onelogin` + +|SAML +|`saml` +|=== + +.Prerequisites + +* You have installed the {product-short}. +For more information about installation, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart]. + +.Procedure + +. Add the configuration for the desired authentication provider under the `auth` section of the `app-config.yaml` file, such as: ++ +-- +[source,yaml] +---- +auth: + environment: development + providers: + github: + development: + clientId: ${AUTH_GITHUB_CLIENT_ID} + clientSecret: ${AUTH_GITHUB_CLIENT_SECRET} +---- +-- + +. Set the authentication provider key as the value to `signInPage` parameter in your `app-config.yaml` file, such as: ++ +`signInPage: oidc` + +. Start your {product-short} application and sign in. \ No newline at end of file diff --git a/modules/installation/proc-install-operator.adoc b/modules/installation/proc-install-operator.adoc new file mode 100644 index 0000000000..6879a8689f --- /dev/null +++ b/modules/installation/proc-install-operator.adoc @@ -0,0 +1,65 @@ +// Module included in the following assemblies +// assembly-install-rhdh-ocp-operator.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-install-operator_{context}"] += Installing the {product} Operator + +As an administrator, you can install the {product} Operator. Authorized users can use the Operator to install {product} on the following platforms: + +* {ocp-brand-name} ({ocp-short}) +* {eks-brand-name} ({eks-short}) +* {aks-brand-name} ({aks-short}) + +{ocp-short} is currently supported from version {ocp-version-min} to {ocp-version}. See also the link:https://access.redhat.com/support/policy/updates/developerhub[{product} Life Cycle]. + +Containers are available for the following CPU architectures: + +* AMD64 and Intel 64 (`x86_64`) + +.Prerequisites + +* You are logged in as an administrator on the {ocp-short} web console. +* You have configured the appropriate roles and permissions within your project to create or access an application. For more information, see the link:https://docs.openshift.com/container-platform/{ocp-version}/applications/index.html[{ocp-brand-name} documentation on Building applications]. + +[IMPORTANT] +==== +For enhanced security, better control over the Operator lifecycle, and preventing potential privilege escalation, install the {product} Operator in a dedicated default `rhdh-operator` namespace. You can restrict other users' access to the Operator resources through role bindings or cluster role bindings. + +You can also install the Operator in another namespace by creating the necessary resources, such as an Operator group. For more information, see link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-global-namespaces_olm-adding-operators-to-a-cluster[Installing global Operators in custom namespaces]. + +However, if the {product} Operator shares a namespace with other Operators, then it shares the same update policy as well, preventing the customization of the update policy. For example, if one Operator is set to manual updates, the {product} Operator update policy is also set to manual. For more information, see link:https://docs.openshift.com/container-platform/latest/operators/understanding/olm/olm-colocation.html#olm-colocation-namespaces_olm-colocation[Colocation of Operators in a namespace]. + +==== + +.Procedure + +. In the *Administrator* perspective of the {ocp-short} web console, click *Operators > OperatorHub*. +. In the *Filter by keyword* box, enter {product-short} and click the *{product}* Operator card. +. On the *{product} Operator* page, click *Install*. + +. On the *Install Operator* page, use the *Update channel* drop-down menu to select the update channel that you want to use: +** The *fast* channel provides y-stream (x.y) and z-stream (x.y.z) updates, for example, updating from version 1.1 to 1.2, or from 1.1.0 to 1.1.1. ++ +[IMPORTANT] +==== +The `fast` channel includes all of the updates available for a particular version. Any update might introduce unexpected changes in your {product} deployment. Check the release notes for details about any potentially breaking changes. +==== + +** The *fast-1.1* channel only provides z-stream updates, for example, updating from version 1.1.1 to 1.1.2. If you want to update the {product} y-version in the future, for example, updating from 1.1 to 1.2, you must switch to the *fast* channel manually. + +. On the *Install Operator* page, choose the *Update approval* strategy for the Operator: +** If you choose the *Automatic* option, the Operator is updated without requiring manual confirmation. +** If you choose the *Manual* option, a notification opens when a new update is released in the update channel. The update must be manually approved by an administrator before installation can begin. + +. Click *Install*. + +.Verification + +* To view the installed {product} Operator, click *View Operator*. + +[role="_additional-resources"] +.Additional resources + +* xref:proc-install-rhdh-ocp-operator_{context}[Deploying {product} on {ocp-short} with the Operator] +* link:https://docs.openshift.com/container-platform/{ocp-version}/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] diff --git a/modules/installation/proc-install-rhdh-airgapped-environment-ocp-helm.adoc b/modules/installation/proc-install-rhdh-airgapped-environment-ocp-helm.adoc new file mode 100644 index 0000000000..96f51a197b --- /dev/null +++ b/modules/installation/proc-install-rhdh-airgapped-environment-ocp-helm.adoc @@ -0,0 +1,138 @@ +// Module included in the following assemblies: +// no assembly + +[id="proc-install-rhdh-airgapped-environment-ocp-helm_{context}"] += Installing {product} in an air-gapped environment with the Helm Chart + +An air-gapped environment, also known as an air-gapped network or isolated network, ensures security by physically segregating the system or network. This isolation is established to prevent unauthorized access, data transfer, or communication between the air-gapped system and external sources. + +You can install {product} in an air-gapped environment to ensure security and meet specific regulatory requirements. + +To install {product-short} in an air-gapped environment, you must have access to the `registry.redhat.io` and the registry for the air-gapped environment. + +.Prerequisites + +* You have installed an {ocp-brand-name} {ocp-version-min} or later. +* You have access to the `registry.redhat.io`. +* You have access to the {ocp-brand-name} image registry of your cluster. For more information about exposing the image registry, see the {ocp-brand-name} documentation about https://docs.openshift.com/container-platform/{ocp-version}/registry/securing-exposing-registry.html[Exposing the registry]. +* You have installed the {openshift-cli} on your workstation. +* You have installed the `podman` command line tools on your workstation. +* You you have an account in https://developers.redhat.com/[{rhdeveloper-name}] portal. + +.Procedure + +. Log in to your {ocp-short} account using the {openshift-cli}, by running the following command: ++ +[source,terminal] +---- +oc login -u -p https://api.:6443 +---- + +. Log in to the {ocp-short} image registry using the `podman` command line tool, by running the following command: ++ +[source,terminal] +---- +podman login -u kubeadmin -p $(oc whoami -t) default-route-openshift-image-registry. +---- ++ +[NOTE] +==== +You can run the following commands to get the full host name of the {ocp-short} image registry, and then use the host name in a command to log in: + +[source,terminal] +---- +REGISTRY_HOST=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') +---- + +[source,terminal] +---- +podman login -u kubeadmin -p $(oc whoami -t) $REGISTRY_HOST +---- +==== + +. Log in to the `registry.redhat.io` in `podman` by running the following command: ++ +[source,terminal] +---- +podman login registry.redhat.io +---- ++ +For more information about registry authentication, see https://access.redhat.com/RegistryAuthentication[{company-name} Container Registry Authentication]. + +. Pull {product-short} and PostgreSQL images from https://catalog.redhat.com/software/containers/search[{company-name} Image registry] to your workstation, by running the following commands: ++ +[source,terminal,source,subs="attributes+"] +---- +podman pull registry.redhat.io/rhdh/rhdh-hub-rhel9:{product-chart-version} +---- ++ +[source,terminal,source,subs="attributes+"] +---- +podman pull registry.redhat.io/rhel9/postgresql-15:latest +---- + +. Push both images to the internal {ocp-short} image registry by running the following commands: ++ +[source,terminal,source,subs="attributes+"] +---- +podman push --remove-signatures registry.redhat.io/rhdh/rhdh-hub-rhel9:{product-chart-version} default-route-openshift-image-registry.//rhdh-hub-rhel9:{product-chart-version} +---- ++ +[source,terminal] +---- +podman push --remove-signatures registry.redhat.io/rhel9/postgresql-15:latest default-route-openshift-image-registry.//postgresql-15:latest +---- ++ +For more information about pushing images directly to the {ocp-short} image registry, see https://access.redhat.com/solutions/6959306[How do I push an Image directly into the OpenShift 4 registry]. ++ +[IMPORTANT] +==== +If an x509 error occurs, verify that you have link:https://access.redhat.com/solutions/6088891[installed the CA certificate used for {ocp-short} routes on your system]. +==== + +. Use the following command to verify that both images are present in the internal {ocp-short} registry: ++ +[source,terminal] +---- +oc get imagestream -n +---- + +. Enable local image lookup for both images by running the following commands: ++ +[source,terminal] +---- +oc set image-lookup postgresql-15 +---- ++ +[source,terminal] +---- +oc set image-lookup rhdh-hub-rhel9 +---- + +. Go to *YAML view* and update the `image` section for `backstage` and `postgresql` using the following values: ++ +-- +.Example values for Developer Hub image +[source,yaml] +---- +upstream: + backstage: + image: + registry: "" + repository: rhdh-hub-rhel9 + tag: latest +---- + +.Example values for PostgreSQL image +[source,yaml] +---- +upstream: + postgresql: + image: + registry: "" + repository: postgresql-15 + tag: latest +---- +-- + +. Install the {product} using Helm chart. diff --git a/modules/installation/proc-install-rhdh-airgapped-environment-ocp-operator.adoc b/modules/installation/proc-install-rhdh-airgapped-environment-ocp-operator.adoc new file mode 100644 index 0000000000..13a7f5e39a --- /dev/null +++ b/modules/installation/proc-install-rhdh-airgapped-environment-ocp-operator.adoc @@ -0,0 +1,72 @@ +// Module included in the following assemblies: +// no assembly + +:_mod-docs-content-type: PROCEDURE +[id="proc-install-rhdh-airgapped-environment-ocp-operator_{context}"] += Installing {product} in an air-gapped environment with the Operator + +On an {ocp-short} cluster operating on a restricted network, public resources are not available. However, deploying the {product} Operator and running {product-short} requires the following public resources: + +* Operator images (bundle, operator, catalog) +* Operands images ({product-very-short}, PostgreSQL) + +To make these resources available, replace them with their equivalent resources in a mirror registry accessible to the {ocp-short} cluster. + +You can use a helper script that mirrors the necessary images and provides the necessary configuration to ensure those images will be used when installing the {product} Operator and creating {product-short} instances. + +[NOTE] +==== +This script requires a target mirror registry which you should already have installed if your {ocp-short} cluster is ready to operate on a restricted network. However, if you are preparing your cluster for disconnected usage, you can use the script to deploy a mirror registry in the cluster and use it for the mirroring process. +==== + +.Prerequisites +* You have an active {openshift-cli} session with administrative permissions to the {ocp-short} cluster. See link:https://docs.openshift.com/container-platform/{ocp-version}/cli_reference/openshift_cli/getting-started-cli.html[Getting started with the OpenShift CLI]. +* You have an active `oc registry` session to the `registry.redhat.io` {company-name} Ecosystem Catalog. See link:https://access.redhat.com/RegistryAuthentication[{company-name} Container Registry Authentication]. +* The `opm` CLI tool is installed. See link:https://docs.openshift.com/container-platform/{ocp-version}/cli_reference/opm/cli-opm-install.html[Installing the opm CLI]. +* The jq package is installed. See link:https://jqlang.github.io/jq/download/[Download jq]. +* Podman is installed. See link:https://podman.io/docs/installation[Podman Installation Instructions]. +* Skopeo version 1.14 or higher is installed. link:https://github.com/containers/skopeo/blob/main/install.md[See Installing Skopeo]. +* If you already have a mirror registry for your cluster, an active Skopeo session with administrative access to this registry is required. See link:https://github.com/containers/skopeo#authenticating-to-a-registry[Authenticating to a registry] and link:https://docs.openshift.com/container-platform/{ocp-version}/installing/disconnected_install/installing-mirroring-installation-images.html[Mirroring images for a disconnected installation]. + +[NOTE] +==== +The internal {ocp-short} cluster image registry cannot be used as a target mirror registry. See link:https://docs.openshift.com/container-platform/{ocp-version}/installing/disconnected_install/installing-mirroring-installation-images.html#installation-about-mirror-registry_installing-mirroring-installation-images[About the mirror registry]. +==== + +* If you prefer to create your own mirror registry, see link:https://docs.openshift.com/container-platform/{ocp-version}/installing/disconnected_install/installing-mirroring-creating-registry.html[Creating a mirror registry with mirror registry for Red Hat OpenShift]. + +* If you do not already have a mirror registry, you can use the helper script to create one for you and you need the following additional prerequisites: ++ +** The cURL package is installed. For {rhel}, the curl command is available by installing the curl package. To use curl for other platforms, see the link:https://curl.se/[cURL website]. +** The `htpasswd` command is available. For {rhel}, the `htpasswd` command is available by installing the `httpd-tools` package. + +.Procedure +. Download and run the mirroring script to install a custom Operator catalog and mirror the related images: `prepare-restricted-environment.sh` (link:https://github.com/redhat-developer/rhdh-operator/blob/{product-version}.x/.rhdh/scripts/prepare-restricted-environment.sh[source]). ++ +[source,yaml,subs="attributes+"] +---- +curl -sSLO https://raw.githubusercontent.com/redhat-developer/rhdh-operator/{product-version}.x/.rhdh/scripts/prepare-restricted-environment.sh + +# if you do not already have a target mirror registry +# and want the script to create one for you +# use the following example: +bash prepare-restricted-environment.sh \ + --prod_operator_index "registry.redhat.io/redhat/redhat-operator-index:v{ocp-version}" \ + --prod_operator_package_name "rhdh" \ + --prod_operator_bundle_name "rhdh-operator" \ + --prod_operator_version "v{product-bundle-version}" + +# if you already have a target mirror registry +# use the following example: +bash prepare-restricted-environment.sh \ + --prod_operator_index "registry.redhat.io/redhat/redhat-operator-index:v{ocp-version}" \ + --prod_operator_package_name "rhdh" \ + --prod_operator_bundle_name "rhdh-operator" \ + --prod_operator_version "v{product-bundle-version}" \ + --use_existing_mirror_registry "my_registry" +---- ++ +[NOTE] +==== +The script can take several minutes to complete as it copies multiple images to the mirror registry. +==== diff --git a/modules/installation/proc-install-rhdh-ocp-helm-cli.adoc b/modules/installation/proc-install-rhdh-ocp-helm-cli.adoc new file mode 100644 index 0000000000..20b4a6f5bb --- /dev/null +++ b/modules/installation/proc-install-rhdh-ocp-helm-cli.adoc @@ -0,0 +1,53 @@ +// Module included in the following assemblies: +// assembly-install-rhdh-ocp-helm.adoc + +[id="proc-install-rhdh-ocp-helm-cli_{context}"] += Deploying {product-short} on {ocp-short} with the Helm CLI + +You can use the Helm CLI to install {product} on {ocp-brand-name}. + +.Prerequisites +* You have installed the {openshift-cli} on your workstation. +* You are logged in to your {ocp-short} account. +* A user with the {ocp-short} admin role has configured the appropriate roles and permissions within your project to create an application. For more information about {ocp-short} roles, see link:https://docs.openshift.com/container-platform/4.15/authentication/using-rbac.html[Using RBAC to define and apply permissions]. +* You have created a project in {ocp-short}. For more information about creating a project in {ocp-short}, see link:https://docs.openshift.com/container-platform/4.15/applications/projects/working-with-projects.html#odc-creating-projects-using-developer-perspective_projects[{ocp-brand-name} documentation]. +* You have installed the Helm CLI tool. + +.Procedure +. Create and activate the __ {ocp-short} project: ++ +[subs="quotes+"] +---- +NAMESPACE=__ +oc new-project ${NAMESPACE} || oc project ${NAMESPACE} +---- + +. Install the {product} Helm chart: ++ +[subs="attributes+"] +---- +helm upgrade redhat-developer-hub -i https://github.com/openshift-helm-charts/charts/releases/download/redhat-redhat-developer-hub-{product-chart-version}/redhat-developer-hub-{product-chart-version}.tgz +---- + +. Configure your {product-short} Helm chart instance with the {product-short} database password and router base URL values from your {ocp-short} cluster: ++ +[subs="attributes+"] +---- +PASSWORD=$(oc get secret redhat-developer-hub-postgresql -o jsonpath="{.data.password}" | base64 -d) +CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') +helm upgrade redhat-developer-hub -i "https://github.com/openshift-helm-charts/charts/releases/download/redhat-redhat-developer-hub-{product-chart-version}/redhat-developer-hub-{product-chart-version}.tgz" \ + --set global.clusterRouterBase="$CLUSTER_ROUTER_BASE" \ + --set global.postgresql.auth.password="$PASSWORD" +---- + +. Display the running {product-short} instance URL: ++ +---- +echo "https://redhat-developer-hub-$NAMESPACE.$CLUSTER_ROUTER_BASE" +---- + +.Verification +* Open the running {product-short} instance URL in your browser to use {product-short}. + +.Additional resources +* link:https://docs.openshift.com/container-platform/4.16/applications/working_with_helm_charts/installing-helm.html[Installing Helm] \ No newline at end of file diff --git a/modules/installation/proc-install-rhdh-ocp-helm-gui.adoc b/modules/installation/proc-install-rhdh-ocp-helm-gui.adoc new file mode 100644 index 0000000000..ea28ba1377 --- /dev/null +++ b/modules/installation/proc-install-rhdh-ocp-helm-gui.adoc @@ -0,0 +1,78 @@ +// Module included in the following assemblies: +// assembly-install-rhdh-ocp-helm.adoc + +[id='proc-install-rhdh-ocp-helm-gui_{context}'] += Deploying {product-short} from the {ocp-short} web console with the Helm Chart + +You can use a Helm chart to install {product-short} on the {ocp-brand-name} web console. + +Helm is a package manager on {ocp-short} that provides the following features: + +* Applies regular application updates using custom hooks +* Manages the installation of complex applications +* Provides charts that you can host on public and private servers +* Supports rolling back to previous application versions + +The {product} Helm chart is available in the Helm catalog on {osd-short} and {ocp-short}. + +.Prerequisites + +* You are logged in to your {ocp-short} account. +* A user with the {ocp-short} `admin` role has configured the appropriate roles and permissions within your project to create an application. For more information about {ocp-short} roles, see link:https://docs.openshift.com/container-platform/{ocp-version}/authentication/using-rbac.html[Using RBAC to define and apply permissions]. +* You have created a project in {ocp-short}. For more information about creating a project in {ocp-short}, see link:https://docs.openshift.com/container-platform/{ocp-version}/applications/projects/working-with-projects.html#odc-creating-projects-using-developer-perspective_projects[{ocp-brand-name} documentation]. + +.Procedure + +. From the *Developer* perspective on the {product-short} web console, click *+Add*. +. From the *Developer Catalog* panel, click *Helm Chart*. +. In the *Filter by keyword* box, enter _Developer Hub_ and click the *{product}* card. +. From the {product} page, click *Create*. +. From your cluster, copy the {ocp-short} router host (for example: `apps..com`). +. Select the radio button to configure the {product-short} instance with either the form view or YAML view. The Form view is selected by default. + +** Using *Form view* ++ +.. To configure the instance with the Form view, go to *Root Schema* -> *global* -> *Enable service authentication within Backstage instance* and paste your {ocp-short} router host into the field on the form. + +** Using *YAML view* ++ +.. To configure the instance with the YAML view, paste your {ocp-short} router hostname in the `global.clusterRouterBase` parameter value as shown in the following example: ++ +[source,yaml] +---- +global: + auth: + backend: + enabled: true + clusterRouterBase: apps..com + # other Red Hat Developer Hub Helm Chart configurations +---- ++ +. Edit the other values if needed. ++ +[NOTE] +==== +The information about the host is copied and can be accessed by the {product-short} backend. + +When an {ocp-short} route is generated automatically, the host value for the route is inferred and the same host information is sent to the {product-short}. Also, if the {product-short} is present on a custom domain by setting the host manually using values, the custom host takes precedence. +==== ++ +. Click *Create* and wait for the database and {product-short} to start. +. Click the *Open URL* icon to start using the {product-short} platform. ++ +image::rhdh/rhdh-helm-install.png[] + +[NOTE] +==== +Your `developer-hub` pod might be in a `CrashLoopBackOff` state if the {product-short} container cannot access the configuration files. This error is indicated by the following log: + +[source,log] +---- +Loaded config from app-config-from-configmap.yaml, env +... +2023-07-24T19:44:46.223Z auth info Configuring "database" as KeyStore provider type=plugin +Backend failed to start up Error: Missing required config value at 'backend.database.client' +---- + +To resolve the error, verify the configuration files. +==== diff --git a/modules/installation/proc-install-rhdh-ocp-operator.adoc b/modules/installation/proc-install-rhdh-ocp-operator.adoc new file mode 100644 index 0000000000..585521c076 --- /dev/null +++ b/modules/installation/proc-install-rhdh-ocp-operator.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// assembly-install-rhdh-ocp-operator.adoc + +[id="proc-install-rhdh-ocp-operator_{context}"] += Deploying {product} on {ocp-short} with the Operator + +As a developer, you can deploy a {product} instance on {ocp-short} by using the *Developer Catalog* in the {ocp-brand-name} web console. This deployment method uses the {product} Operator. + +.Prerequisites + +* A cluster administrator has installed the {product} Operator. For more information, see xref:proc-install-operator_{context}[]. +* You have added a custom configuration file to {ocp-short}. For more information, see link:{LinkAdminGuide}[Adding a custom configuration file to {ocp-short}]. + +.Procedure + +. Create a project in {ocp-short} for your {product} instance, or select an existing project. ++ +[TIP] +==== +For more information about creating a project in {ocp-short}, see link:https://docs.openshift.com/container-platform/4.15/applications/projects/working-with-projects.html#creating-a-project-using-the-web-console_projects[Creating a project by using the web console] in the {ocp-brand-name} documentation. +==== +. From the *Developer* perspective on the {ocp-short} web console, click *+Add*. +. From the *Developer Catalog* panel, click *Operator Backed*. +. In the *Filter by keyword* box, enter _{product-short}_ and click the *{product}* card. +. Click *Create*. +. Add custom configurations for the {product} instance. +. On the *Create Backstage* page, click *Create* + +.Verification + +After the pods are ready, you can access the {product} platform by opening the URL. + +. Confirm that the pods are ready by clicking the pod in the *Topology* view and confirming the *Status* in the *Details* panel. The pod status is *Active* when the pod is ready. + +. From the *Topology* view, click the *Open URL* icon on the {product-short} pod. ++ +image::rhdh/operator-install-1.png[] + +[role="_additional-resources"] +[id="additional-resources_proc-install-rhdh-ocp-operator"] +.Additional resources +* link:https://docs.openshift.com/container-platform/{ocp-version}/applications/index.html[{ocp-short} - Building applications overview] diff --git a/modules/installation/proc-install-rhdh-osd-gcp-helm.adoc b/modules/installation/proc-install-rhdh-osd-gcp-helm.adoc new file mode 100644 index 0000000000..b53318b52f --- /dev/null +++ b/modules/installation/proc-install-rhdh-osd-gcp-helm.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// assembly-install-rhdh-osd-gcp.adoc + +[id="proc-install-rhdh-osd-gcp-helm_{context}"] += Installing {product} on {osd-short} on {gcp-short} using the Helm Chart + +You can install {product-short} on {osd-short} on {gcp-short} using the {product} Helm Chart. + +.Prerequisites +* You have a valid {gcp-short} account. +* Your {osd-short} cluster is running on {gcp-short}. For more information, see link:https://docs.redhat.com/en/documentation/openshift_dedicated/4/html/installing_accessing_and_deleting_openshift_dedicated_clusters/osd-creating-a-cluster-on-gcp[Creating a cluster on GCP] in {osd-brand-name} documentation. +* You have installed Helm 3 or the latest. + +.Procedure +. From the *Developer* perspective on the {product-short} web console, click *+Add*. +. From the *Developer Catalog* panel, click *Helm Chart*. +. In the *Filter by keyword* box, enter {product-short} and click the *{product}* card. +. From the {product} page, click *Create*. +. From your cluster, copy the {ocp-short} router host (for example: `apps..com`). +. Select the radio button to configure the {product-short} instance with either the form view or YAML view. The *Form view* is selected by default. ++ +-- +.. Using *Form view* +... To configure the instance with the Form view, go to *Root Schema → global → Enable service authentication within Backstage instance* and paste your {ocp-short} router host into the field on the form. + +.. Using *YAML view* +... To configure the instance with the YAML view, paste your {ocp-short} router hostname in the `global.clusterRouterBase` parameter value as shown in the following example: ++ +[source,yaml,subs="attributes+"] +---- +global: + auth: + backend: + enabled: true + clusterRouterBase: apps..com + # other {product} Helm Chart configurations +---- +-- + +. Edit the other values if needed, then click *Create* and wait for the database and {product-short} to start. + +.Verification + +* To access the the {product-short}, click the *Open URL* icon. + +.Additional resources +* link:{LinkAdminGuide}[{NameOfAdminGuide}] diff --git a/modules/installation/proc-install-rhdh-osd-gcp-operator.adoc b/modules/installation/proc-install-rhdh-osd-gcp-operator.adoc new file mode 100644 index 0000000000..14062de60c --- /dev/null +++ b/modules/installation/proc-install-rhdh-osd-gcp-operator.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// assembly-install-rhdh-osd-gcp.adoc + +[id="proc-install-rhdh-osd-gcp-operator_{context}"] += Installing {product} on {osd-short} on {gcp-short} using the Operator + +You can install {product-short} on {osd-short} on {gcp-short} using the {product} Operator. + +.Prerequisites +* You have a valid {gcp-short} account. +* Your {osd-short} cluster is running on {gcp-short}. For more information, see link:https://docs.redhat.com/en/documentation/openshift_dedicated/4/html/installing_accessing_and_deleting_openshift_dedicated_clusters/osd-creating-a-cluster-on-gcp[Creating a cluster on GCP] in {osd-brand-name} documentation. +* You have administrator access to {osd-short} cluster and {gcp-short} project. + +.Procedure + +. In the *Administrator* perspective of the {ocp-short} web console, click *Operators > OperatorHub*. +. In the *Filter by keyword* box, enter {product-short} and click the *{product} Operator* card. +. On the *{product} Operator* page, click *Install*. +. In the {ocp-short} console, navigate to *Installed Operators* and select *{product} Operator*. +. From the {product-short} Operator page, click *Create New Instance* and specify the name and namespace where you want to deploy {product-short}. +. Configure the required settings such as Git integration, secret management, and user permissions. +. Review the configuration, select deployment options, and click *Create*. + +.Verification + +* To access the {product-short}, navigate to the {product-short} URL provided in the {ocp-short} web console. + +.Additional resources +* link:{LinkAdminGuide}[{NameOfAdminGuide}] \ No newline at end of file diff --git a/modules/installation/proc-rhdh-deploy-aks-helm.adoc b/modules/installation/proc-rhdh-deploy-aks-helm.adoc new file mode 100644 index 0000000000..6a0163b107 --- /dev/null +++ b/modules/installation/proc-rhdh-deploy-aks-helm.adoc @@ -0,0 +1,215 @@ +// Module included in the following assemblies +// assembly-install-rhdh-aks.adoc + +[id="proc-rhdh-deploy-aks-helm_{context}"] += Deploying {product-short} on {aks-short} with the Helm chart + +You can deploy your {product-short} application on {aks-name} ({aks-short}) to access a comprehensive solution for building, testing, and deploying applications. + +.Prerequisites + +* You have a {azure-brand-name} account with active subscription. +* You have installed the https://learn.microsoft.com/en-us/cli/azure/install-azure-cli[Azure CLI]. +* You have installed the link:https://kubernetes.io/docs/reference/kubectl/[`kubectl` CLI]. +* You are logged into your cluster using `kubectl`, and have `developer` or `admin` permissions. +* You have installed Helm 3 or the latest. + +.Comparison of {aks-short} specifics with the base {product-short} deployment + +* *Permissions issue*: {product-short} containers might encounter permission-related errors, such as `Permission denied` when attempting certain operations. This error can be addresssed by adjusting the `fsGroup` in the `PodSpec.securityContext`. + +* *Ingress configuration*: In AKS, configuring ingress is essential for accessing the installed {product-short} instance. Accessing the {product-short} instance requires enabling the Routing add-on, an NGINX-based Ingress Controller, using the following command: ++ +[source,terminal] +---- +az aks approuting enable --resource-group --name +---- ++ +[TIP] +==== +You might need to install the {azure-short} CLI extension `aks-preview`. If the extension is not installed automatically, you might need to install it manually using the following command: + +[source,terminal] +---- +az extension add --upgrade -n aks-preview --allow-preview true +---- +==== ++ +[NOTE] +==== +After you install the Ingress Controller, the `app-routing-system` namespace with the Ingress Controller will be deployed in your cluster. Note the address of your {product-short} application from the installed Ingress Controller (for example, 108.141.70.228) for later access to the {product-short} application, later referenced as ``. + +[source,terminal] +---- +kubectl get svc nginx --namespace app-routing-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +---- +==== + +* *Namespace management*: You can create a dedicated namespace for {product-short} deployment in {aks-short} using the following command: ++ +[source,terminal] +---- +kubectl create namespace +---- + +.Procedure + +. Log in to {aks-short} by running the following command: ++ +[source,terminal] +---- +az login [--tenant=] +---- + +. Create a resource group by running the following command: ++ +[source,terminal] +---- +az group create --name --location +---- ++ +[TIP] +==== +You can list available regions by running the following command: + +[source,terminal] +---- +az account list-locations -o table +---- + +==== + +. Create an {aks-short} cluster by running the following command: ++ +[source,terminal] +---- +az aks create \ +--resource-group \ +--name \ +--enable-managed-identity \ +--generate-ssh-keys +---- ++ +You can refer to `--help` for additional options. + +. Connect to your cluster by running the following command: ++ +[source,terminal] +---- +az aks get-credentials --resource-group --name +---- ++ +The previous command configures the Kubernetes client and sets the current context in the `kubeconfig` to point to your {aks-short} cluster. + +. Open terminal and run the following command to add the Helm chart repository: ++ +[source,terminal] +---- +helm repo add openshift-helm-charts https://charts.openshift.io/ +---- + +. Create and activate the __ namespace: ++ +[source,terminal,subs="quotes+"] +---- +DEPLOYMENT_NAME= +NAMESPACE= +kubectl create namespace ${NAMESPACE} +kubectl config set-context --current --namespace=${NAMESPACE} +---- + +. Create a pull secret, which is used to pull the {product-short} images from the {company-name} Ecosystem, by running the following command: ++ +[source,terminal] +---- +kubectl -n $NAMESPACE create secret docker-registry rhdh-pull-secret \ + --docker-server=registry.redhat.io \ + --docker-username= \ + --docker-password= \ + --docker-email= +---- + +. Create a file named `values.yaml` using the following template: ++ +[source,yaml] +---- +global: + host: +route: + enabled: false +upstream: + ingress: + enabled: true + className: webapprouting.kubernetes.azure.com + host: + backstage: + image: + pullSecrets: + - rhdh-pull-secret + podSecurityContext: + fsGroup: 3000 + postgresql: + image: + pullSecrets: + - rhdh-pull-secret + primary: + podSecurityContext: + enabled: true + fsGroup: 3000 + volumePermissions: + enabled: true +---- + +. To install {product-short} by using the Helm chart, run the following command: ++ +[source,terminal,subs="attributes+"] +---- +helm -n $NAMESPACE install -f values.yaml $DEPLOYMENT_NAME openshift-helm-charts/redhat-developer-hub --version {product-chart-version} +---- + +. Verify the deployment status: ++ +[source,terminal] +---- +kubectl get deploy $DEPLOYMENT_NAME -n $NAMESPACE +---- + +. Configure your {product-short} Helm chart instance with the {product-short} database password and router base URL values from your cluster: ++ +[source,terminal,subs="attributes+"] +---- +PASSWORD=$(kubectl get secret redhat-developer-hub-postgresql -o jsonpath="{.data.password}" | base64 -d) +CLUSTER_ROUTER_BASE=$(kubectl get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') +helm upgrade $DEPLOYMENT_NAME -i "https://github.com/openshift-helm-charts/charts/releases/download/redhat-redhat-developer-hub-{product-chart-version}/redhat-developer-hub-{product-chart-version}.tgz" \ + --set global.clusterRouterBase="$CLUSTER_ROUTER_BASE" \ + --set global.postgresql.auth.password="$PASSWORD" +---- + +. Display the running {product-short} instance URL, by running the following command: ++ +[source,terminal] +---- +echo "https://$DEPLOYMENT_NAME-$NAMESPACE.$CLUSTER_ROUTER_BASE" +---- + +.Verification + +* Open the running {product-short} instance URL in your browser to use {product-short}. + +.Upgrade + +* To upgrade the deployment, run the following command: ++ +[source,terminal,subs="attributes+"] +---- +helm upgrade $DEPLOYMENT_NAME -i https://github.com/openshift-helm-charts/charts/releases/download/redhat-redhat-developer-hub-{product-chart-version}/redhat-developer-hub-{product-chart-version}.tgz +---- + +.Delete + +* To delete the deployment, run the following command: ++ +[source,terminal] +---- +helm -n $NAMESPACE delete $DEPLOYMENT_NAME +---- diff --git a/modules/installation/proc-rhdh-deploy-aks-operator.adoc b/modules/installation/proc-rhdh-deploy-aks-operator.adoc new file mode 100644 index 0000000000..b5e1636bbd --- /dev/null +++ b/modules/installation/proc-rhdh-deploy-aks-operator.adoc @@ -0,0 +1,170 @@ +// Module included in the following assemblies +// assembly-install-rhdh-aks.adoc + +[id='proc-rhdh-deploy-aks-operator_{context}'] += Deploying {product-short} on {aks-short} with the Operator + +You can deploy your {product-short} on {aks-short} using the {product} Operator. + +.Procedure + +. Obtain the {product} Operator manifest file, named `rhdh-operator-.yaml`, and modify the default configuration of `db-statefulset.yaml` and `deployment.yaml` by adding the following fragment: ++ +-- +[source,yaml] +---- +securityContext: + fsGroup: 300 +---- + +Following is the specified locations in the manifests: + +[source,yaml] +---- +db-statefulset.yaml: | spec.template.spec +deployment.yaml: | spec.template.spec +---- +-- + +. Apply the modified Operator manifest to your Kubernetes cluster: ++ +-- +[source,bash] +---- +kubectl apply -f rhdh-operator-.yaml +---- + +[NOTE] +==== +Execution of the previous command is cluster-scoped and requires appropriate cluster privileges. +==== +-- + +. Create an `ImagePull Secret` named `rhdh-pull-secret` using your Red Hat credentials to access images from the protected `registry.redhat.io` as shown in the following example: ++ +-- +[source,bash] +---- +kubectl -n create secret docker-registry rhdh-pull-secret \ + --docker-server=registry.redhat.io \ + --docker-username= \ + --docker-password= \ + --docker-email= +---- +-- + +. Create an Ingress manifest file, named `rhdh-ingress.yaml`, specifying your {product-short} service name as follows: ++ +-- +[source,yaml] +---- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rhdh-ingress + namespace: +spec: + ingressClassName: webapprouting.kubernetes.azure.com + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: backstage- + port: + name: http-backend +---- +-- + +. To deploy the created Ingress, run the following command: ++ +-- +[source,terminal] +---- +kubectl -n apply -f rhdh-ingress.yaml +---- +-- + +. Create a ConfigMap named `app-config-rhdh` containing the {product-short} configuration using the following example: ++ +-- +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config-rhdh +data: + "app-config-rhdh.yaml": | + app: + title: Red Hat Developer Hub + baseUrl: https:// + backend: + auth: + externalAccess: + - type: legacy + options: + subject: legacy-default-config + secret: "${BACKEND_SECRET}" + baseUrl: https:// + cors: + origin: https:// +---- +-- + +. Create a Secret named `secrets-rhdh` and add a key named `BACKEND_SECRET` with a `Base64-encoded` string value as shown in the following example: ++ +-- +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: secrets-rhdh +stringData: + BACKEND_SECRET: "xxx" +---- +-- + +. Create a Custom Resource (CR) manifest file named `rhdh.yaml` and include the previously created `rhdh-pull-secret` as follows: ++ +-- +[source,yaml] +---- +apiVersion: rhdh.redhat.com/v1alpha1 +kind: Backstage +metadata: + name: +spec: + application: + imagePullSecrets: + - rhdh-pull-secret + appConfig: + configMaps: + - name: "app-config-rhdh" + extraEnvs: + secrets: + - name: "secrets-rhdh" +---- +-- + +. Apply the CR manifest to your namespace: ++ +-- +[source,terminal] +---- +kubectl -n apply -f rhdh.yaml +---- +-- + +. Access the deployed {product-short} using the URL: `https://`, where is the Ingress address obtained earlier (for example, `https://108.141.70.228`). +. Optional: To delete the CR, run the following command: ++ +-- +[source,terminal] +---- +kubectl -n delete -f rhdh.yaml +---- +-- diff --git a/modules/installation/proc-rhdh-deploy-eks-helm.adoc b/modules/installation/proc-rhdh-deploy-eks-helm.adoc new file mode 100644 index 0000000000..c6aff3342e --- /dev/null +++ b/modules/installation/proc-rhdh-deploy-eks-helm.adoc @@ -0,0 +1,127 @@ +// Module included in the following assemblies +// assembly-install-rhdh-eks.adoc + +[id='proc-rhdh-deploy-eks-helm_{context}'] += Installing {product-short} on {eks-short} with the Helm chart + +When you install the {product-short} Helm chart in {eks-name} ({eks-short}), it orchestrates the deployment of a {product-short} instance, which provides a robust developer platform within the {aws-short} ecosystem. + +.Prerequisites + +* You have an {eks-short} cluster with AWS Application Load Balancer (ALB) add-on installed. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html[Application load balancing on Amazon {product-short}] and https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html[Installing the AWS Load Balancer Controller add-on]. +* You have configured a domain name for your {product-short} instance. The domain name can be a hosted zone entry on Route 53 or managed outside of AWS. For more information, see https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-configuring.html[Configuring Amazon Route 53 as your DNS service] documentation. +* You have an entry in the AWS Certificate Manager (ACM) for your preferred domain name. Make sure to keep a record of your Certificate ARN. +* You have subscribed to `registry.redhat.io`. For more information, see https://access.redhat.com/RegistryAuthentication[{company-name} Container Registry Authentication]. +* You have set the context to the {eks-short} cluster in your current `kubeconfig`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html[Creating or updating a kubeconfig file for an Amazon {eks-short} cluster]. +* You have installed `kubectl`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html[Installing or updating kubectl]. +* You have installed Helm 3 or the latest. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/helm.html[Using Helm with Amazon {eks-short}]. + +.Procedure + +. Go to your terminal and run the following command to add the Helm chart repository containing the {product-short} chart to your local Helm registry: ++ +-- +[source,terminal] +---- +helm repo add openshift-helm-charts https://charts.openshift.io/ +---- +-- + +. Create a pull secret using the following command: ++ +-- +[source,terminal] +---- +kubectl create secret docker-registry rhdh-pull-secret \ + --docker-server=registry.redhat.io \ + --docker-username= \ <1> + --docker-password= \ <2> + --docker-email= <3> +---- +<1> Enter your username in the command. +<2> Enter your password in the command. +<3> Enter your email address in the command. + +The created pull secret is used to pull the {product-short} images from the {company-name} Ecosystem. +-- + +. Create a file named `values.yaml` using the following template: ++ +[source,yaml,subs="attributes+"] +---- +global: + # TODO: Set your application domain name. + host: + + +route: + enabled: false + + +upstream: + service: + # NodePort is required for the ALB to route to the Service + type: NodePort + + + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: alb + + + alb.ingress.kubernetes.io/scheme: internet-facing + + + # TODO: Using an ALB HTTPS Listener requires a certificate for your own domain. Fill in the ARN of your certificate, e.g.: + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:xxx:xxxx:certificate/xxxxxx + + + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + + + alb.ingress.kubernetes.io/ssl-redirect: '443' + + + # TODO: Set your application domain name. + external-dns.alpha.kubernetes.io/hostname: + + + backstage: + image: + pullSecrets: + - rhdh-pull-secret + podSecurityContext: + # you can assign any random value as fsGroup + fsGroup: 2000 + postgresql: + image: + pullSecrets: + - rhdh-pull-secret + primary: + podSecurityContext: + enabled: true + # you can assign any random value as fsGroup + fsGroup: 3000 + volumePermissions: + enabled: true +---- +. Run the following command in your terminal to deploy {product-short} using the latest version of Helm Chart and using the values.yaml file created in the previous step: ++ +[source,terminal,subs="attributes+"] +---- +helm install rhdh \ + openshift-helm-charts/redhat-developer-hub \ + [--version {product-chart-version}] \ + --values /path/to/values.yaml +---- + +[NOTE] +==== +For the latest chart version, see https://github.com/openshift-helm-charts/charts/tree/main/charts/redhat/redhat/redhat-developer-hub +==== + +.Verification + +Wait until the DNS name is responsive, indicating that your {product-short} instance is ready for use. + diff --git a/modules/installation/proc-rhdh-deploy-eks-operator.adoc b/modules/installation/proc-rhdh-deploy-eks-operator.adoc new file mode 100644 index 0000000000..70e8ffc313 --- /dev/null +++ b/modules/installation/proc-rhdh-deploy-eks-operator.adoc @@ -0,0 +1,365 @@ +// Module included in the following assemblies +// assembly-install-rhdh-eks.adoc + +[id='proc-rhdh-deploy-eks-operator_{context}'] += Installing {product-short} on {eks-short} with the Operator + +You can install the {product} Operator with or without the Operator Lifecycle Manager (OLM) framework. + +.Additonal resources +* For information about the OLM, see link:https://olm.operatorframework.io/docs/[Operator Lifecycle Manager(OLM)] documentation. + +== Installing the {product-short} Operator with the OLM framework + +You can install the {product-short} Operator on {eks-short} using the https://olm.operatorframework.io[Operator Lifecycle Manager (OLM) framework]. Following that, you can proceed to deploy your {product-short} instance in {eks-short}. + +.Prerequisites + +* You have set the context to the {eks-short} cluster in your current `kubeconfig`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html[Creating or updating a kubeconfig file for an Amazon {eks-short} cluster]. +* You have installed `kubectl`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html[Installing or updating kubectl]. +* You have subscribed to `registry.redhat.io`. For more information, see https://access.redhat.com/RegistryAuthentication[{company-name} Container Registry Authentication]. +* You have installed the Operator Lifecycle Manager (OLM). For more information about installation and troubleshooting, see https://operatorhub.io/how-to-install-an-operator#How-do-I-get-Operator-Lifecycle-Manager?[How do I get Operator Lifecycle Manager?] + +.Procedure + +. Run the following command in your terminal to create the `rhdh-operator` namespace where the Operator is installed: ++ +-- +[source,terminal] +---- +kubectl create namespace rhdh-operator +---- +-- + +. Create a pull secret using the following command: ++ +-- +[source,terminal] +---- +kubectl -n rhdh-operator create secret docker-registry rhdh-pull-secret \ + --docker-server=registry.redhat.io \ + --docker-username= \ <1> + --docker-password= \ <2> + --docker-email= <3> +---- + +<1> Enter your username in the command. +<2> Enter your password in the command. +<3> Enter your email address in the command. + +The created pull secret is used to pull the {product-short} images from the {company-name} Ecosystem. +-- + +. Create a `CatalogSource` resource that contains the Operators from the {company-name} Ecosystem: ++ +-- +[source,terminal,subs="attributes+"] +---- +cat <.yaml`, which is updated manually. +-- + +. Run the following command to apply replacements in the generated deployment manifest: ++ +-- +[source,terminal] +---- +sed -i "s/backstage-operator/rhdh-operator/g" rhdh-operator-*.yaml +sed -i "s/backstage-system/rhdh-operator/g" rhdh-operator-*.yaml +sed -i "s/backstage-controller-manager/rhdh-controller-manager/g" rhdh-operator-*.yaml +---- +-- + +. Open the generated deployment manifest file in an editor and perform the following steps: +.. Locate the `db-statefulset.yaml` string and add the `fsGroup` to its `spec.template.spec.securityContext`, as shown in the following example: ++ +-- +[source,yaml] +---- + db-statefulset.yaml: | + apiVersion: apps/v1 + kind: StatefulSet +--- TRUNCATED --- + spec: + --- TRUNCATED --- + restartPolicy: Always + securityContext: + # You can assign any random value as fsGroup + fsGroup: 2000 + serviceAccount: default + serviceAccountName: default +--- TRUNCATED --- +---- +-- + +.. Locate the `deployment.yaml` string and add the `fsGroup` to its specification, as shown in the following example: ++ +-- +[source,yaml] +---- + deployment.yaml: | + apiVersion: apps/v1 + kind: Deployment +--- TRUNCATED --- + spec: + securityContext: + # You can assign any random value as fsGroup + fsGroup: 3000 + automountServiceAccountToken: false +--- TRUNCATED --- +---- +-- + +.. Locate the `service.yaml` string and change the `type` to `NodePort` as follows: ++ +-- +[source,yaml] +---- + service.yaml: | + apiVersion: v1 + kind: Service + spec: + # NodePort is required for the ALB to route to the Service + type: NodePort +--- TRUNCATED --- +---- +-- + +.. Replace the default images with the images that are pulled from the {company-name} Ecosystem: ++ +-- +[source,terminal,subs="attributes+"] +---- +sed -i "s#gcr.io/kubebuilder/kube-rbac-proxy:.*#registry.redhat.io/openshift4/ose-kube-rbac-proxy:v{ocp-version}#g" rhdh-operator-*.yaml + +sed -i "s#(quay.io/janus-idp/operator:.*|quay.io/rhdh-community/operator:.*)#registry.redhat.io/rhdh/rhdh-rhel9-operator:{product-version}#g" rhdh-operator-*.yaml + +sed -i "s#quay.io/janus-idp/backstage-showcase:.*#registry.redhat.io/rhdh/rhdh-hub-rhel9:{product-version}#g" rhdh-operator-*.yaml + +sed -i "s#quay.io/fedora/postgresql-15:.*#registry.redhat.io/rhel9/postgresql-15:latest#g" rhdh-operator-*.yaml +---- +-- + +. Add the image pull secret to the manifest in the Deployment resource as follows: ++ +-- +[source,yaml] +---- +--- TRUNCATED --- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: rhdh-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: rhdh-operator + control-plane: controller-manager + name: rhdh-controller-manager + namespace: rhdh-operator +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + imagePullSecrets: + - name: rhdh-pull-secret +--- TRUNCATED --- +---- +-- + +. Apply the manifest to deploy the operator using the following command: ++ +-- +[source,terminal] +---- +kubectl apply -f rhdh-operator-VERSION.yaml +---- +-- + +. Run the following command to verify that the Operator is running: ++ +-- +[source,terminal] +---- +kubectl -n rhdh-operator get pods -w +---- +-- diff --git a/modules/observability/proc-admin-enabling-metrics-ocp-helm.adoc b/modules/observability/proc-admin-enabling-metrics-ocp-helm.adoc new file mode 100644 index 0000000000..91d572b74c --- /dev/null +++ b/modules/observability/proc-admin-enabling-metrics-ocp-helm.adoc @@ -0,0 +1,43 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-rhdh-observability.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-admin-enabling-metrics-ocp-helm_{context}"] += Enabling metrics monitoring in a Helm chart installation on an {ocp-short} cluster + +You can enable and view metrics for a {product} Helm deployment from the *Developer* perspective of the {ocp-short} web console. + +.Prerequisites + +* Your {ocp-short} cluster has link:https://docs.openshift.com/container-platform/latest/observability/monitoring/enabling-monitoring-for-user-defined-projects.html[monitoring for user-defined projects] enabled. +* You have installed {product} on {ocp-short} using the Helm chart. + +.Procedure + +. From the *Developer* perspective in the {ocp-short} web console, select the *Topology* view. +. Click the overflow menu of the {product} Helm chart, and select *Upgrade*. ++ +image::rhdh/helm-upgrade.png[] + +. On the *Upgrade Helm Release* page, select the *YAML view* option in *Configure via*, then configure the `metrics` section in the YAML, as shown in the following example: ++ +[source,yaml] +---- +upstream: +# ... + metrics: + serviceMonitor: + enabled: true + path: /metrics +# ... +---- ++ +image::rhdh/upgrade-helm-metrics.png[] + +. Click *Upgrade*. + +.Verification + +. From the *Developer* perspective in the {ocp-short} web console, select the *Observe* view. +. Click the *Metrics* tab to view metrics for {product} pods. diff --git a/modules/observability/proc-admin-enabling-metrics-ocp-operator.adoc b/modules/observability/proc-admin-enabling-metrics-ocp-operator.adoc new file mode 100644 index 0000000000..a4283f2036 --- /dev/null +++ b/modules/observability/proc-admin-enabling-metrics-ocp-operator.adoc @@ -0,0 +1,57 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-rhdh-observability.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-admin-enabling-metrics-ocp-operator_{context}"] += Enabling metrics monitoring in a {product} Operator installation on an {ocp-short} cluster + +You can enable and view metrics for an Operator-installed {product} instance from the *Developer* perspective of the {ocp-short} web console. + +.Prerequisites + +* Your {ocp-short} cluster has link:https://docs.openshift.com/container-platform/latest/observability/monitoring/enabling-monitoring-for-user-defined-projects.html[monitoring for user-defined projects] enabled. +* You have installed {product} on {ocp-short} using the {product} Operator. +* You have installed the {openshift-cli}. + +.Procedure + +Currently, the {product} Operator does not support creating a `ServiceMonitor` custom resource (CR) by default. You must complete the following steps to create a `ServiceMonitor` CR to scrape metrics from the endpoint. + +. Create the `ServiceMonitor` CR as a YAML file: ++ +[source,yaml] +---- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: # <1> + namespace: # <2> + labels: + app.kubernetes.io/instance: + app.kubernetes.io/name: backstage +spec: + namespaceSelector: + matchNames: + - + selector: + matchLabels: + rhdh.redhat.com/app: backstage- + endpoints: + - port: backend + path: '/metrics' +---- +<1> Replace `` with the name of your {product} CR. +<2> Replace `` with the name of the {ocp-short} project where your {product} instance is running. + +. Apply the `ServiceMonitor` CR by running the following command: ++ +[source,terminal] +---- +oc apply -f +---- + +.Verification + +. From the *Developer* perspective in the {ocp-short} web console, select the *Observe* view. +. Click the *Metrics* tab to view metrics for {product} pods. diff --git a/modules/proc-create-plugin.adoc b/modules/proc-create-plugin.adoc new file mode 100644 index 0000000000..b234338130 --- /dev/null +++ b/modules/proc-create-plugin.adoc @@ -0,0 +1,38 @@ +[id="proc-create-plugin"] + += Creating a plugin for the {product} + +You can create a plugin for the {product-short} application. + +.Prerequisites + +* {product} application is installed and running. For more information, see link:{LinkAdminGuide}({NameOfAdminGuide}). +* `yarn` command and associated dependencies are installed. + +.Procedure + +. In the terminal application, run the following command from the root of your {product-short} project: ++ +-- +[source,terminal] +---- +yarn new --select plugin +---- + +The previous command creates a new plugin based on the provided ID. The plugin is added to the {product-short} application automatically. +-- + +. If your {product-short} application is running, navigate to `http://localhost:3000/my-new-plugin` to access the default page of your new plugin. + +. Optional: To access the plugin in isolation, run `yarn start` command in the plugin directory or using the `yarn workspace` command as follows: ++ +-- +[source,bash] +---- +yarn workspace {pluginID} start # Also supports --check <1> +---- + +<1> ID of the new plugin such as `@janus-idp/my-new-plugin` + +Accessing the plugin in isolation results in quicker iteration and startup times. This approach is specifically for local development, and you can find the necessary configuration in the plugin's `dev/` directory. +-- diff --git a/modules/release-notes/con-release-notes-overview.adoc b/modules/release-notes/con-release-notes-overview.adoc new file mode 100644 index 0000000000..772a366cdd --- /dev/null +++ b/modules/release-notes/con-release-notes-overview.adoc @@ -0,0 +1,40 @@ +[id='con-release-notes-overview_{context}'] += About this release + +The release notes provide high-level coverage of the features that have been implemented in {product} {product-version} and document known issues in this release. + +Some features within this release may be available as a Technology Preview, providing access to upcoming product features, enabling customers to test functionality and to provide feedback during the development process. + +For more information about the support scope of Red Hat Technology Preview features, read https://access.redhat.com/support/offerings/techpreview[Technology Preview Support Scope]. + +Benefits of {product} include: + +* *Increased developer productivity*: Increases productivity by eliminating common organizational challenges, enabling seamless collaboration, and providing clear guidelines for creating, developing, and deploying applications. +* *Unified self-service dashboard*: Provides development teams with a unified dashboard covering various aspects such as Git, CI/CD, SAST/DAST, Supply Chain, OpenShift/Kubernetes cluster, JIRA, monitoring, API, documentation, and more, facilitated by over 150 plugins. All curated by a platform engineering team, aligning with the company's best practices. +* *Best practices through software templates*: Automates organizational best practices by encoding common tasks such as creating new applications, running Ansible jobs, and establishing CI/CD pipelines for production deployment in Git. +* *Scalable technical documentation*: Code and documentation resides in the same repository, eliminating dependencies on proprietary document systems. +* *Efficient onboarding for new developers*: New developers quickly adapt and become productive within a short timeframe. +* *Robust enterprise Role-Based Access Control (RBAC)*: Empowers administrators to create roles, assign users or groups to roles, and implement robust security policies for enhanced access control. + +.Additional resources + +* link:{upgrading-book-url}[{upgrading-book-title}]. + +== Supported Platforms + +{product} runs on {ocp-short} {ocp-version-min}-{ocp-version} on the following CPU architecture: + +* AMD64 and Intel 64 (`x86_64`) + +In addition to {ocp-short}, the following hosted environments are also supported: + +* {eks-brand-name} +* {aks-brand-name} +* {osd-short} on {gcp-brand-name} + +.Additional resources + +* link:https://docs.redhat.com/en/documentation/openshift_container_platform[OpenShift documentation] +* link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}#Install%20and%20Upgrade[{product} Installation guides] + + diff --git a/modules/release-notes/list-fixed-security-issues-in-product-1.3.0.txt b/modules/release-notes/list-fixed-security-issues-in-product-1.3.0.txt new file mode 100644 index 0000000000..36e91d9a09 --- /dev/null +++ b/modules/release-notes/list-fixed-security-issues-in-product-1.3.0.txt @@ -0,0 +1,6 @@ +CVE-2024-24790 +CVE-2024-24791 +CVE-2024-35255 +CVE-2024-37891 +CVE-2024-39008 +CVE-2024-39249 diff --git a/modules/release-notes/list-fixed-security-issues-in-rpm-1.3.0.txt b/modules/release-notes/list-fixed-security-issues-in-rpm-1.3.0.txt new file mode 100644 index 0000000000..bbd5596eeb --- /dev/null +++ b/modules/release-notes/list-fixed-security-issues-in-rpm-1.3.0.txt @@ -0,0 +1,26 @@ +CVE-2023-52439 +CVE-2023-52884 +CVE-2024-6119 +CVE-2024-26739 +CVE-2024-26929 +CVE-2024-26930 +CVE-2024-26931 +CVE-2024-26947 +CVE-2024-26991 +CVE-2024-27022 +CVE-2024-35895 +CVE-2024-36016 +CVE-2024-36899 +CVE-2024-38562 +CVE-2024-38570 +CVE-2024-38573 +CVE-2024-38601 +CVE-2024-38615 +CVE-2024-39331 +CVE-2024-40984 +CVE-2024-41071 +CVE-2024-42225 +CVE-2024-42246 +CVE-2024-45490 +CVE-2024-45491 +CVE-2024-45492 diff --git a/modules/release-notes/single-source-fixed-security-issues.sh b/modules/release-notes/single-source-fixed-security-issues.sh new file mode 100755 index 0000000000..4fa0864f95 --- /dev/null +++ b/modules/release-notes/single-source-fixed-security-issues.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# +# Copyright (c) 2024 Red Hat, Inc. +# This program, and the accompanying materials are made +# available under the terms of the Apache Public License 2.0, +# available at http://www.apache.org/licenses/ +# +# SPDX-License-Identifier: Apache-2.0 + +# Single-source the release notes Fixed security issues section from Red Hat Security Data API. +# See: https://docs.redhat.com/en/documentation/red_hat_security_data_api/1.0/html/red_hat_security_data_api/cve + +# Fail and stop on first error +set -e + +# get the z-stream version from the bundle-version attribute. Note that while chart-version could be larger, this is the correct value for CVE tracking +product_version="$(grep ':product-bundle-version:' artifacts/attributes.adoc | cut -d' ' -f2 )" + +single_source_from_security_data () { + sectionname="fixed-security-issues-in-${section}-${product_version}" + dirname=$(dirname ${BASH_SOURCE}) + destination="${dirname}/snip-${sectionname}.adoc" + list="${dirname}/list-${sectionname}.txt" + # Assert that the list file exists. + if [ ! -f ${list} ] + then + echo "ERROR: The ${list} file is missing. You must create it to proceed. For a given version, can collect the list of CVEs from a JIRA query like https://issues.redhat.com/issues/?jql=labels%3DSecurityTracking+and+project%3DRHIDP+and+fixversion%3D1.3.1 or list of Erratas from https://errata.devel.redhat.com/advisory/filters/4213" + exit 1 + fi + # Cleanup the destination files. + rm -f "$destination" + # Send output to the destination file. + exec 3>&1 1>> "$destination" + echo "= ${title}" + for cve in $(cat ${list} | sort | uniq) + do + # Start the list. + echo "link:https://access.redhat.com/security/cve/$cve[$cve]::" + # Call the API to return a list of details. + # Red Hat is last if there is one. + # Red Hat details is single line. + # MITRE details are multiline. + # We keep Red Hat details if present. + # We keep only the first two lines on MITRE details. + curl -s "https://access.redhat.com/hydra/rest/securitydata/cve/$cve.json" | jq -r '.details[-1]' | head -n 2 + # Add a separation + echo "" + done + # Stop sending output to the destination file + exec 1>&3 3>&- + echo "include::${destination}[leveloffset=+2]" +} + +title="{product} dependency updates" +section="product" +single_source_from_security_data + +title="RHEL 9 platform RPM updates" +section="rpm" +single_source_from_security_data + +echo "INFO: Verify that the assemblies/assembly-release-notes-fixed-security-issues.adoc file contains aforementioned required include statements." diff --git a/modules/release-notes/single-source-release-notes.py b/modules/release-notes/single-source-release-notes.py new file mode 100755 index 0000000000..cd9dc4afb6 --- /dev/null +++ b/modules/release-notes/single-source-release-notes.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +Copyright (c) 2024 Red Hat, Inc. +This program, and the accompanying materials are made +available under the terms of the Apache Public License 2.0, +available at http://www.apache.org/licenses/ + +SPDX-License-Identifier: Apache-2.0 + +Prerequisites: +$ pip3 install --requirement requirements.txt + +Generate the AsciiDoc files for the release notes and known issues from JIRA content. +""" +import glob +import jinja2 +import os +import yaml +from jira import JIRA + +# Define location for product attributes, templates, and generated files. +root_dir = os.path.normpath( + os.path.normpath( + os.path.dirname( + __file__ + ) + ) + '/../..' +) +product_attributes = root_dir + '/artifacts/product-attributes.adoc' +templates_dir = root_dir + '/build/templates/' +assemblies_dir = root_dir + '/assemblies/' +modules_dir = root_dir + '/modules/release-notes/' +# Load Jinja2 templates. +env = jinja2.Environment( + loader=jinja2.FileSystemLoader( + templates_dir + ) +) +# Load configuration file +with open( + root_dir + '/jira2asciidoc.yml', + 'r' +) as file: + config = yaml.safe_load(file) +# Load AsciiDoc attributes. +product_version_minor_glob = config['product']['version']['minor_glob'] +product_version_patch = config['product']['version']['patch'] +# Configure access to Jira using kerberos +jira = JIRA( + server=config['jira']['server'], + token_auth=os.environ.get( + 'JIRA_TOKEN' + ) +) +# Delete old file files. +fileList = glob.glob( + modules_dir + 'snip-*-rhidp-*.adoc' +) +for filePath in fileList: + os.remove(filePath) +# Generate the release notes and known issues assemblies and files +for section in config['sections']: + # Search in Jira for issues to publish defined in jira_query + query = section["query"].format( + version_minor_glob=product_version_minor_glob, + version_patch=product_version_patch + ) + print(query) + issues = jira.search_issues(query) + # Create the assembly file + assembly_file = open( + assemblies_dir + 'assembly-release-notes-' + section["id"] + '.adoc', + 'w' + ) + assembly_template = env.get_template( + 'assembly.adoc.jinja' + ) + print( + assembly_template.render( + assembly_id=section["id"], + assembly_title=section["title"], + assembly_introduction=section["description"], + vars=issues, + ), + file=assembly_file + ) + # Create the file files + for issue in issues: + # Collect values from these fields: + issue_key = format(issue.key) # Issue id + issue_rn_status = format(issue.fields.customfield_12310213) # Release Note Status + issue_rn_text = format(issue.fields.customfield_12317313) # Release Note Text + issue_rn_type = format(issue.fields.customfield_12320850) # Release Note Type + issue_template = section["template"] + issue_title = format(issue.fields.summary) # Issue title + # Define AsciiDoc file id, file, and content + file_id = format(issue_rn_type + "-" + issue_key).lower().replace(" ", "-") + snippet_file = open( + modules_dir + 'snip-' + file_id + '.adoc', + 'w' + ) + snippet_template = env.get_template( + 'snippet-' + issue_template + '.adoc.jinja2' + ) + print( + snippet_template.render( + id=file_id, + key=issue_key, + text=issue_rn_text, + title=issue_title, + ), + file=snippet_file + ) +# Report final status +print( + 'INFO: Single-sourced release notes from Jira for version {version} in {dir}' + .format( + version=product_version_patch, + dir=modules_dir + ) +) diff --git a/modules/release-notes/snip-bug-fix-rhidp-1334.adoc b/modules/release-notes/snip-bug-fix-rhidp-1334.adoc new file mode 100644 index 0000000000..ba605ec6e9 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-1334.adoc @@ -0,0 +1,7 @@ +[id="bug-fix-rhidp-1334"] += Removed unnecessary pull secret from the {product} Helm Chart + +With this update, the {product} Helm Chart does not contain a pull secret that is no longer needed. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-1334[RHIDP-1334] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2139.adoc b/modules/release-notes/snip-bug-fix-rhidp-2139.adoc new file mode 100644 index 0000000000..f92cec7c22 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2139.adoc @@ -0,0 +1,10 @@ +[id="bug-fix-rhidp-2139"] += Filtering for permissions policies that do not exist leads to an error being thrown + +Before this update, permission checks by the permission framework would throw an error if a matching permission policy was not previously defined. +Therefore, {product-short} denied the request with an error. + +With this update, {product-short} denies the request without throwing an error. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2139[RHIDP-2139] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2374.adoc b/modules/release-notes/snip-bug-fix-rhidp-2374.adoc new file mode 100644 index 0000000000..5595898431 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2374.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-2374"] += Added missing plugin name in the RBAC administration interface + +Before this update, the RBAC administration user interface *Permission Policies* table did not display the plugin name. + +With this update, the RBAC administration user interface *Permission Policies* table displays the plugin name. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2374[RHIDP-2374] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2412.adoc b/modules/release-notes/snip-bug-fix-rhidp-2412.adoc new file mode 100644 index 0000000000..5547c5d1c3 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2412.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-2412"] += HTTP error code 431 when an user is member of many groups + +Before this update, {product-short} API became unresponsive when a user was member of a high number of groups (more than 150) with aggregated relations. +Therefore, the user might have failed to authenticate. +Also, {product-short} might have shown an error when opening the user entity in the UI. + +With this update, {product-short} can handle a user member of a high number of groups (more than 150) with aggregated relations. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2412[RHIDP-2412] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2438.adoc b/modules/release-notes/snip-bug-fix-rhidp-2438.adoc new file mode 100644 index 0000000000..e3c17d1a58 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2438.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-2438"] += OCM calls are not scoped to the OCM page + +Before this update, when the OCM plugin is installed, navigating to non-OCM pages triggered unnecessary failed OCM API calls. + +With this update, {product-short} restricts OCM API calls to OCM-related pages. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2438[RHIDP-2438] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2529.adoc b/modules/release-notes/snip-bug-fix-rhidp-2529.adoc new file mode 100644 index 0000000000..f88996b81d --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2529.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-2529"] += When login using azure entra sso, it will use id to match the user entity, which will fail to match with user entity imported by msgraph. + +Before this update, {product-short} failed to resolve user entities with Azure authentication provider to entities ingested by the MsGraph catalog provider. +Therefore, a user authentication with Microsoft Azure could not open a session in {product-short}. + +With this update, {product-short} resolves user entities with Azure authentication provider to entities ingested by the MsGraph catalog provider. +Therefore, a user authentication with Microsoft Azure can open a session in {product-short}. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2529[RHIDP-2529] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2716.adoc b/modules/release-notes/snip-bug-fix-rhidp-2716.adoc new file mode 100644 index 0000000000..83216cb304 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2716.adoc @@ -0,0 +1,10 @@ +[id="bug-fix-rhidp-2716"] += Replaced the deprecated `backend.auth.keys` field in the default configuration + +Before this update, in a {product-short} deployment with the default configuration, the application logs displayed the deprecation warning. + +With this update, the default `upstream.backstage.appConfig` configuration uses the `backend.auth.externalAccess` field rather than the deprecated `backend.auth.keys` field. + + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2716[RHIDP-2716] diff --git a/modules/release-notes/snip-bug-fix-rhidp-2728.adoc b/modules/release-notes/snip-bug-fix-rhidp-2728.adoc new file mode 100644 index 0000000000..7eb0c9204d --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-2728.adoc @@ -0,0 +1,7 @@ +[id="bug-fix-rhidp-2728"] += Handle PII logging in Application Logs + +With this update, {product-short} does not include user IP addresses in the application logs. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-2728[RHIDP-2728] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3159.adoc b/modules/release-notes/snip-bug-fix-rhidp-3159.adoc new file mode 100644 index 0000000000..f01dac4338 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3159.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-3159"] += The last ~10 GitHub Pull Requests are missing from the list + +Before this update, {product-short} ignored GitHub search API restrictions to list pull requests. +Therefore, {product-short} might have not displayed all pull requests. + +With this update, {product-short} limits paging to max 1000 results to respect GitHub search API restrictions. +{product-short} show users when additional results are available, suggesting in a tooltip that they can refine their query to retrieve more specific results. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3159[RHIDP-3159] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3217.adoc b/modules/release-notes/snip-bug-fix-rhidp-3217.adoc new file mode 100644 index 0000000000..54fe4fc007 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3217.adoc @@ -0,0 +1,10 @@ +[id="bug-fix-rhidp-3217"] += rhtap installation always failed at RHDH due to Migration table is already locked + +Before this update, after updating a config map or a secret, when pods where restarting to apply the changes, they might have tried to simultaneous lock the database. +The situation ended with a dead lock. + +With this update, {product-short} handles simultaneous pod refreshing without a dead lock. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3217[RHIDP-3217] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3260.adoc b/modules/release-notes/snip-bug-fix-rhidp-3260.adoc new file mode 100644 index 0000000000..5e5a985e36 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3260.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-3260"] += Renamed optional secret dynamic-plugins-npmrc in helm chart + +Before this update, the Helm Chart was using an unversioned name for the dynamic-plugins-npmrc secret. +Therefore subsequent Helm deployments of the RHDH Helm Chart version 1.2.1 failed after the first deployment with an error that a secret named dynamic-plugins-npmrc exists and is not owned by the current release. + +With this update, the Helm Chart creates and uses a dynamic-plugins-npmrc secret that is named in line with the other resources managed by the Helm Chart: `__-dynamic-plugins-npmrc`. +As a result, the Helm Chart does not fail on the previous error. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3260[RHIDP-3260] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3458.adoc b/modules/release-notes/snip-bug-fix-rhidp-3458.adoc new file mode 100644 index 0000000000..3a372c7b95 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3458.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-3458"] += Backstage Specific Metrics no longer appear in /metrics endpoint + +Before this update, {product-short} stopped displaying some metrics such as catalog metrics in the `____/metrics` endpoint. + +With this update, {product-short} displays expected metrics in the /metrics endpoint. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3458[RHIDP-3458] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3471.adoc b/modules/release-notes/snip-bug-fix-rhidp-3471.adoc new file mode 100644 index 0000000000..48272b9c93 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3471.adoc @@ -0,0 +1,25 @@ +[id="bug-fix-rhidp-3471"] += Theme issues with plugins using material 5 + +Before this update, {product-short} had theme issues with plugins using Material UI (MUI) 5. + +With this update, {product-short} includes additional MUI 5 related packages, added to the application shell as shared modules. +Therefore, dynamic plugins that use MUI 5 components and tss-react can properly load the currently selected theme. +This ensures that MUI 5 components have the correct colors and styling applied to them. + +While not strictly a requirement, if a dynamic plugin relies on MUI 5 components with a class name prefix, that behavior can be added to a frontend dynamic plugin by adding the following code to the plugin's index.ts: + +---- +import { unstable_ClassNameGenerator as ClassNameGenerator } from '@mui/material/className'; + +ClassNameGenerator.configure(componentName => { + return componentName.startsWith('v5-') + ? componentName + : `v5-${componentName}`; +}) +---- + +This update requires using a version of the @janus-idp/cli package > 1.13.1. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3471[RHIDP-3471] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3580.adoc b/modules/release-notes/snip-bug-fix-rhidp-3580.adoc new file mode 100644 index 0000000000..a5a96e46af --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3580.adoc @@ -0,0 +1,22 @@ +[id="bug-fix-rhidp-3580"] += Creating RBAC role with name that contains ':' or '/' creates a role that does nothing and cannot be deleted + +Before this update, creating an RBAC role with name that contains ':' or '/' through the REST API (or RBAC admin panel in the UI) created a role that did nothing and could not be deleted. +Although the name of the role showed up in full as written in the POST request, when clicked on for more information about the role it showed only the part of the name written before the first ':' or '/'. +Also while the list of RBAC roles did list how many policies were added to the role, when clicking on the role for more information it displayed no users or policies. + + +With this udpate, {product-short} validates more strictly role and namespace names in accordance with backstage validation: + +{product-short} invalidates role names that do not conform with the format: + +- Strings of length at least 1, and at most 63. +- Must consist of sequences of `[a-z0-9A-Z]` possibly separated by one of `[-_.]`. + +{product-short} invalidates namespaces that do not conform with the format: + +- Strings of length at least 1, and at most 63. +- Must be sequences of `[a-zA-Z0-9]`, possibly separated by `-`. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3580[RHIDP-3580] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3601.adoc b/modules/release-notes/snip-bug-fix-rhidp-3601.adoc new file mode 100644 index 0000000000..04526c6c8c --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3601.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-3601"] += Update contitional policies and policies loaded from files when these files are deleted + +Before this update, conditional policies and policies loaded from files remained active after the corresponding policy files were removed from the configuration. + +With this update, conditional policies and policies loaded from files are removed after the corresponding policy files are removed from the configuration. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3601[RHIDP-3601] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3612.adoc b/modules/release-notes/snip-bug-fix-rhidp-3612.adoc new file mode 100644 index 0000000000..71dd61eec1 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3612.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-3612"] += Fixed the timestamp inserted by `catalog:timestamping` + +Before this update, the timestamp in the `catalog-info.yaml` created by the `catalog:timestamping` action by the `backstage-scaffolder-backend-module-annotator` plugin was different from the execution time of the template. + +With this update, a unique timestamp is generated on each execution of the template. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3612[RHIDP-3612] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3735.adoc b/modules/release-notes/snip-bug-fix-rhidp-3735.adoc new file mode 100644 index 0000000000..35d41fdc80 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3735.adoc @@ -0,0 +1,10 @@ +[id="bug-fix-rhidp-3735"] += Added missing virtual machine details to the sidebar + +Before this update, when a user displayed the virtual machine details in the sidebar, the icon corresponding to virtual machine was not shown. + +With this update, the missing icons have been added. +Therefore, when a user displays the virtual machine details in the sidebar, an icon shows the virtual machine status. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3735[RHIDP-3735] diff --git a/modules/release-notes/snip-bug-fix-rhidp-3896.adoc b/modules/release-notes/snip-bug-fix-rhidp-3896.adoc new file mode 100644 index 0000000000..8535cc95b7 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-3896.adoc @@ -0,0 +1,9 @@ +[id="bug-fix-rhidp-3896"] += Authenticate with GitHub a user absent in the software catalog when `dangerouslyAllowSignInWithoutUserInCatalog` is set to true + +Before this update, authentication with Github failed when the `dangerouslyAllowSignInWithoutUserInCatalog` field was set to true and the user was absent from the software catalog. + +With this update, when the `dangerouslyAllowSignInWithoutUserInCatalog` field is set to true, you can authenticate to {product-short} with a user absent from the software catalog. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3896[RHIDP-3896] diff --git a/modules/release-notes/snip-bug-fix-rhidp-4013.adoc b/modules/release-notes/snip-bug-fix-rhidp-4013.adoc new file mode 100644 index 0000000000..3f135999b3 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-4013.adoc @@ -0,0 +1,10 @@ +[id="bug-fix-rhidp-4013"] += The {product-short} image defined in the custom resource takes precedence on the image defined in the environment variable + +Before this update, when the {product-short} image was configured in both the custom resource and in the 'RELATED_IMAGE_backstage' environment variable, the image defined in the custom resource was not used. + +With this update, the custom resource configuration takes precedence and is applied. + + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4013[RHIDP-4013] diff --git a/modules/release-notes/snip-bug-fix-rhidp-4046.adoc b/modules/release-notes/snip-bug-fix-rhidp-4046.adoc new file mode 100644 index 0000000000..bbecc2fca7 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-4046.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-4046"] += Updated the search dropdown to display results for a large number of users or groups + +Before this update, in the RBAC administration page, the members dropdown was not able to load a large number of users or groups. +Therefore, the {product-short} administrator was not able to select required users or groups to add to the role. + +With this update, the dropdown displays initially up to 100 users or groups shown and updates the display once the user starts to search. The search happens across the whole data-set and displays the first 100 results. The user must refine their search to narrow the results to a list containing the desired user or group. +Therefore, even with larger numbers or users/groups, the {product-short} administrator can add required users or groups to the role. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4046[RHIDP-4046] diff --git a/modules/release-notes/snip-bug-fix-rhidp-4200.adoc b/modules/release-notes/snip-bug-fix-rhidp-4200.adoc new file mode 100644 index 0000000000..a423e75529 --- /dev/null +++ b/modules/release-notes/snip-bug-fix-rhidp-4200.adoc @@ -0,0 +1,11 @@ +[id="bug-fix-rhidp-4200"] += Bundled ArgoCD plugin with dynamic frontent assets + +Before this update, the ArgoCD plugin was bundled with dynamic backend plugin assets rather than dynamic frontend plugin assets. +Therefore the ArgoCD plugin failed to load. + +With this update, the ArgoCD plugin is bundled with dynamic frontend plugin assets. +Therefore the ArgoCD plugin can load properly. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4200[RHIDP-4200] diff --git a/modules/release-notes/snip-deprecated-functionality-rhidp-1138.adoc b/modules/release-notes/snip-deprecated-functionality-rhidp-1138.adoc new file mode 100644 index 0000000000..2843ec3636 --- /dev/null +++ b/modules/release-notes/snip-deprecated-functionality-rhidp-1138.adoc @@ -0,0 +1,53 @@ +[id="deprecated-functionality-rhidp-1138"] += `spec.application.image`, `spec.application.replicas` and `spec.application.imagePullSecrets` fields are deprecated + +`spec.application.image`, `spec.application.replicas` and `spec.application.imagePullSecrets` fields are deprecated in `v1alpha2` in favour of `spec.deployment`. + +Procedure: + +To update your {product-short} Operation configuration: + +. Remove the `spec.application.image`, `spec.application.replicas` and `spec.application.imagePullSecrets` fields from the Operator configuration: ++ +[source,yaml] +---- +spec: + application: + replicas: 2 # <1> + imagePullSecrets: # <2> + - my-secret-name + image: quay.io/my/my-rhdh:latest # <3> +---- +<1> Replica count. +<2> Array of image pull secrets names. +<3> Image name. + + +. Replace the removed fields by new `spec.deployment` fields, such as: ++ +[source,yaml] +---- +spec: + deployment: + patch: + spec: + replicas: 2 # <1> + imagePullSecrets: # <2> + - name: my-secret-name + template: + metadata: + labels: + my: true + spec: + containers: + - name: backstage-backend + image: quay.io/my/my-rhdh:latest # <3> +---- +<1> Replica count. +<2> Array of image pull secrets names. +<3> Image name. + +// https://github.com/redhat-developer/rhdh-operator/blob/main/docs/configuration.md#deployment-parameters + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-1138[RHIDP-1138] diff --git a/modules/release-notes/snip-enhancement-rhidp-2615.adoc b/modules/release-notes/snip-enhancement-rhidp-2615.adoc new file mode 100644 index 0000000000..5aa7381dd4 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2615.adoc @@ -0,0 +1,14 @@ +[id="enhancement-rhidp-2615"] += Persisting the audit log + +With this update, you can persist the audit log: + +* You can send {product} audit logs to a rotating file. + +* You can send logs to a locked down file with append only rights. + +* When using the Helm chart, {product-short} writes logs to persistent volumes. + + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2615[RHIDP-2615] diff --git a/modules/release-notes/snip-enhancement-rhidp-2695.adoc b/modules/release-notes/snip-enhancement-rhidp-2695.adoc new file mode 100644 index 0000000000..cca4606cd7 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2695.adoc @@ -0,0 +1,7 @@ +[id="enhancement-rhidp-2695"] += All public endpoints in core and plugins have OpenAPI specs + +With this update, OpenAPI Specs are available for all components, including the rbac-backend plugin. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2695[RHIDP-2695] diff --git a/modules/release-notes/snip-enhancement-rhidp-2723.adoc b/modules/release-notes/snip-enhancement-rhidp-2723.adoc new file mode 100644 index 0000000000..50aca24207 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2723.adoc @@ -0,0 +1,7 @@ +[id="enhancement-rhidp-2723"] += RBAC Backend plugin module support + +With this update, {product-short} can load roles and permissions into the RBAC Backend plugin through the use of extension points with the help of a plugin module. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2723[RHIDP-2723] diff --git a/modules/release-notes/snip-enhancement-rhidp-2736.adoc b/modules/release-notes/snip-enhancement-rhidp-2736.adoc new file mode 100644 index 0000000000..35a454c87b --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2736.adoc @@ -0,0 +1,10 @@ +[id="enhancement-rhidp-2736"] += Force catalog ingestion for production users + +By default, it is now required for the user entity to exist in the software catalog to allow sign in. +This is required for production ready deployments since identities need to exist and originate from a trusted source (i.e. the Identity Provider) in order for security controls such as RBAC and Audit logging to be effective. +To bypass this, enable the `dangerouslySignInWithoutUserInCatalog` configuration that allows sign in without the user being in the catalog. +Enabling this option is dangerous as it might allow unauthorized users to gain access. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2736[RHIDP-2736] diff --git a/modules/release-notes/snip-enhancement-rhidp-2768.adoc b/modules/release-notes/snip-enhancement-rhidp-2768.adoc new file mode 100644 index 0000000000..66277a454f --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2768.adoc @@ -0,0 +1,11 @@ +[id="enhancement-rhidp-2768"] += RBAC UI enhancements + +With this update, the RBAC UI has been improved: + +* The **Create role** form and the **Role** overview page display the total number of conditional rules configured. +* The **Role** list page displays accessible plugins. + + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2768[RHIDP-2768] diff --git a/modules/release-notes/snip-enhancement-rhidp-2790.adoc b/modules/release-notes/snip-enhancement-rhidp-2790.adoc new file mode 100644 index 0000000000..357f1c4ffc --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2790.adoc @@ -0,0 +1,18 @@ +[id="enhancement-rhidp-2790"] += Updated Backstage version + +With this update, Backstage was updated to version {product-backstage-version}. + +.Additional resources: +* link:https://github.com/backstage/backstage/releases/tag/v1.27.0[Backstage 1.27 release notes] +* link:https://github.com/backstage/backstage/blob/v1.27.0/docs/releases/v1.27.0-changelog.md[Backstage 1.27 changelog] +* link:https://github.com/backstage/backstage/releases/tag/v1.28.0[Backstage 1.28 release notes] +* link:https://github.com/backstage/backstage/blob/v1.28.0/docs/releases/v1.28.0-changelog.md[Backstage 1.28 changelog] +* link:https://github.com/backstage/backstage/releases/tag/v1.29.0[Backstage 1.29 release notes] +* link:https://github.com/backstage/backstage/blob/v1.29.2/docs/releases/v1.29.0-changelog.md[Backstage 1.29 changelog] +* link:https://issues.redhat.com/browse/RHIDP-2794[RHIDP-2794] +* link:https://issues.redhat.com/browse/RHIDP-2847[RHIDP-2847] +* link:https://issues.redhat.com/browse/RHIDP-2796[RHIDP-2796] + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2790[RHIDP-2790] diff --git a/modules/release-notes/snip-enhancement-rhidp-2818.adoc b/modules/release-notes/snip-enhancement-rhidp-2818.adoc new file mode 100644 index 0000000000..bede5d7647 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-2818.adoc @@ -0,0 +1,9 @@ +[id="enhancement-rhidp-2818"] += Authenticating with Microsoft Azure + +The Microsoft Azure Authentication provider is now enterprise ready. +To enable this, enhancements and bug fixes were made to improve the authentication and entity ingestion process. +Note, the existence of user entity in the catalog is now enforced. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2818[RHIDP-2818] diff --git a/modules/release-notes/snip-enhancement-rhidp-3064.adoc b/modules/release-notes/snip-enhancement-rhidp-3064.adoc new file mode 100644 index 0000000000..0972034f2c --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-3064.adoc @@ -0,0 +1,11 @@ +[id="enhancement-rhidp-3064"] += Customizing the main navigation sidebar + +This update introduces a configurable and customizable main navigation sidebar in {product-short}, offering administrators greater control over the navigation structure. Previously, the sidebar was hard-coded with limited flexibility, and dynamic plugins could only contribute menu items without control over their order or structure. + +With this feature, administrators can now configure the order of navigation items, create nested sub-navigation, and provide users with a more organized and intuitive interface. This enhancement improves user experience and efficiency by allowing a more tailored navigation setup. + +Backward compatibility is maintained, ensuring existing dynamic plugin menu item contributions remain functional. A default configuration is provided, along with example configurations, including one with an external dynamic plugin. Documentation has been updated to guide developers on customizing the navigation. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3064[RHIDP-3064] diff --git a/modules/release-notes/snip-enhancement-rhidp-3125.adoc b/modules/release-notes/snip-enhancement-rhidp-3125.adoc new file mode 100644 index 0000000000..ec9ef3cad5 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-3125.adoc @@ -0,0 +1,7 @@ +[id="enhancement-rhidp-3125"] += Surfacing Catalog Processing Errors to Users + +With this update, the `@backstage/plugin-catalog-backend-module-logs` plugin has been made available as a dynamic plugin to help surface catalog errors into the logs. This dynamic plugin is disabled by default. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3125[RHIDP-3125] diff --git a/modules/release-notes/snip-enhancement-rhidp-3826.adoc b/modules/release-notes/snip-enhancement-rhidp-3826.adoc new file mode 100644 index 0000000000..9fd0940917 --- /dev/null +++ b/modules/release-notes/snip-enhancement-rhidp-3826.adoc @@ -0,0 +1,43 @@ +[id="enhancement-rhidp-3826"] += Loading a custom Backstage theme from a dynamic plugin + +With this update, you can load a custom Backstage theme from a dynamic plugin. + +.Procedure + +. Export a theme provider function in the dynamic plugin, such as: ++ +[source,javascript] +---- +import { lightTheme } from './lightTheme'; // some custom theme +import { UnifiedThemeProvider } from '@backstage/theme'; +export const lightThemeProvider = ({ children }: { children: ReactNode }) => ( + +); +---- + +. Configure {product-short} to load the them in the UI by using the new `themes` configuration field: ++ +[source,yaml] +---- +dynamicPlugins: + frontend: + example.my-custom-theme-plugin: + themes: + - id: light # <1> + title: Light + variant: light + icon: someIconReference + importName: lightThemeProvider +---- +<1> Set your theme id. Optionally, override the default Developer Hub themes specifying following id value: `light` overrides the default light theme and `dark` overrides the default dark theme. + +.Verification + +* The theme is available in the "Settings" page. + + +This update also introduced the ability to override core API service factories from a dynamic plugin, which can be helpful for more specialized use cases such as providing a custom ScmAuth configuration for the {product-short} frontend. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3826[RHIDP-3826] diff --git a/modules/release-notes/snip-feature-rhidp-2232.adoc b/modules/release-notes/snip-feature-rhidp-2232.adoc new file mode 100644 index 0000000000..abef0bbdbc --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2232.adoc @@ -0,0 +1,10 @@ +[id="feature-rhidp-2232"] += Customizing the deployment by using the custom resource + +With this update, when deploying {product} by using the operator, you can configure the {product-short} Deployment resource. +The {product-short} Operator Custom Resource Definition (CRD) API Version has been updated to `rhdh.redhat.com/v1alpha2`. +This CRD exposes a generic `spec.deployment.patch` field, which allows you to patch the {product-short} Deployment resource. + + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2232[RHIDP-2232] diff --git a/modules/release-notes/snip-feature-rhidp-2341.adoc b/modules/release-notes/snip-feature-rhidp-2341.adoc new file mode 100644 index 0000000000..2540f6b7f4 --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2341.adoc @@ -0,0 +1,7 @@ +[id="feature-rhidp-2341"] += Using nested conditions in RBAC conditional policies + +With this update, as a {product-short} administrator, you can create and edit nested conditions in RBAC conditional policies by using the {product-short} web UI. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2341[RHIDP-2341] diff --git a/modules/release-notes/snip-feature-rhidp-2643.adoc b/modules/release-notes/snip-feature-rhidp-2643.adoc new file mode 100644 index 0000000000..33229b584f --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2643.adoc @@ -0,0 +1,7 @@ +[id="feature-rhidp-2643"] += Allow Dynamic Configuration of Keycloak User/Group Transformers + +With this update, you can provide transformer functions for users and groups to mutate entity parameters from Keycloak before their ingestion into the catalog. This can be done by creating a new backend module and using the added keycloakTransformerExtensionPoint. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2643[RHIDP-2643] diff --git a/modules/release-notes/snip-feature-rhidp-2644.adoc b/modules/release-notes/snip-feature-rhidp-2644.adoc new file mode 100644 index 0000000000..645bad8ffd --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2644.adoc @@ -0,0 +1,11 @@ +[id="feature-rhidp-2644"] += Expose extension points for the keycloak-backend plugin + +With this update, you can provide transformer functions for user/group to mutate the entity from Keycloak before their ingestion into the catalog with the new Backstage backend. + +.Procedure +. Create a backend module. +. Provide the custom transformers to the `keycloakTransformerExtensionPoint` extension point exported by the package. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2644[RHIDP-2644] diff --git a/modules/release-notes/snip-feature-rhidp-2865.adoc b/modules/release-notes/snip-feature-rhidp-2865.adoc new file mode 100644 index 0000000000..8dcb33932d --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2865.adoc @@ -0,0 +1,10 @@ +[id="feature-rhidp-2865"] += Deploying on OpenShift Dedicated on Google Cloud Provider (GCP) + +Before this update, there was no automated process to deploy {product-short} on OpenShift Dedicated (OSD) on Google Cloud Platform (GCP). + +With this update, you can link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.3/html-single/installing_red_hat_developer_hub_on_openshift_dedicated_on_google_cloud_platform/index[install {product} on OpenShift Dedicated (OSD) on Google Cloud Platform (GCP)] by using either Red Hat Developer Hub Operator or Red Hat Developer Hub Helm Chart. + + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2865[RHIDP-2865] diff --git a/modules/release-notes/snip-feature-rhidp-2888.adoc b/modules/release-notes/snip-feature-rhidp-2888.adoc new file mode 100644 index 0000000000..3664f86e8f --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2888.adoc @@ -0,0 +1,7 @@ +[id="feature-rhidp-2888"] += Visualize Virtual Machine nodes on the Topology plugin + +With this update, you can visualize the Virtual Machine nodes deployed on the cluster through the Topology plugin. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2888[RHIDP-2888] diff --git a/modules/release-notes/snip-feature-rhidp-2907.adoc b/modules/release-notes/snip-feature-rhidp-2907.adoc new file mode 100644 index 0000000000..ee50caa081 --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-2907.adoc @@ -0,0 +1,8 @@ +[id="feature-rhidp-2907"] += Customizing the Home page + +With this update, you can customize the Home page in {product} by passing the data into the `app-config.yaml` file as a proxy. It is now possible to add, reorganize, and remove cards, including the search bar, quick access, headline, markdown, placeholder, catalog starred entities and featured docs that appear based on the plugins you have installed and enabled. + + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-2907[RHIDP-2907] diff --git a/modules/release-notes/snip-feature-rhidp-3177.adoc b/modules/release-notes/snip-feature-rhidp-3177.adoc new file mode 100644 index 0000000000..0eb6d73397 --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-3177.adoc @@ -0,0 +1,10 @@ +[id="feature-rhidp-3177"] += Configuring conditional policies by using external files + +With this release, you can configure conditional policies in {product-short} using external files. +Additionally, {product-short} supports conditional policy aliases, which are dynamically substituted with the appropriate values during policy evaluation. + +For more information, see link:{authorization-book-url}#con-rbac-conditional-policies-rhdh_title-authorization[Configuring conditional policies]. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3177[RHIDP-3177] diff --git a/modules/release-notes/snip-feature-rhidp-3569.adoc b/modules/release-notes/snip-feature-rhidp-3569.adoc new file mode 100644 index 0000000000..882c0bceba --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-3569.adoc @@ -0,0 +1,10 @@ +[id="feature-rhidp-3569"] += Restarting {product} faster + +Before this update, it took a long time for {product-short} to restart because {product-short} bootstraps all dynamic plugins from zero with every restart. + +With this update, {product-short} is using persisted volumes for the dynamic plugins. +Therefore, {product-short} restarts faster. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3569[RHIDP-3569] diff --git a/modules/release-notes/snip-feature-rhidp-3666.adoc b/modules/release-notes/snip-feature-rhidp-3666.adoc new file mode 100644 index 0000000000..49ffd961ad --- /dev/null +++ b/modules/release-notes/snip-feature-rhidp-3666.adoc @@ -0,0 +1,9 @@ +[id="feature-rhidp-3666"] += Monitoring active users on Developer Hub + +With this update, you can monitor active users on Developer Hub using the `licensed-users-info-backend` plugin. This plugin provides statistical data on logged-in users through the Web UI or REST API endpoints. + +For more information, see link:{authorization-book-url}[{authorization-book-title}]. + +// .Additional resources +// * link:https://issues.redhat.com/browse/RHIDP-3666[RHIDP-3666] diff --git a/modules/release-notes/snip-fixed-security-issues-in-product-1.3.0.adoc b/modules/release-notes/snip-fixed-security-issues-in-product-1.3.0.adoc new file mode 100644 index 0000000000..9de02ac2b4 --- /dev/null +++ b/modules/release-notes/snip-fixed-security-issues-in-product-1.3.0.adoc @@ -0,0 +1,19 @@ += {product} dependency updates +link:https://access.redhat.com/security/cve/CVE-2024-24790[CVE-2024-24790]:: +A flaw was found in the Go language standard library net/netip. The method Is*() (IsPrivate(), IsPublic(), etc) doesn't behave properly when working with IPv6 mapped to IPv4 addresses. The unexpected behavior can lead to integrity and confidentiality issues, specifically when these methods are used to control access to resources or data. + +link:https://access.redhat.com/security/cve/CVE-2024-24791[CVE-2024-24791]:: +A flaw was found in Go. The net/http module mishandles specific server responses from HTTP/1.1 client requests. This issue may render a connection invalid and cause a denial of service. + +link:https://access.redhat.com/security/cve/CVE-2024-35255[CVE-2024-35255]:: +A flaw was found in the Azure identity library at github.com/Azure/azure-sdk-for-go/sdk/azidentity. This issue allows an elevation of privileges. + +link:https://access.redhat.com/security/cve/CVE-2024-37891[CVE-2024-37891]:: +A flaw was found in urllib3, an HTTP client library for Python. In certain configurations, urllib3 does not treat the `Proxy-Authorization` HTTP header as one carrying authentication material. This issue results in not stripping the header on cross-origin redirects. + +link:https://access.redhat.com/security/cve/CVE-2024-39008[CVE-2024-39008]:: +A flaw was found in the fast-loops Node.js package. This flaw allows an attacker to alter the behavior of all objects inheriting from the affected prototype by passing arguments to the objectMergeDeep function crafted with the built-in property: __proto__. This issue can potentially lead to a denial of service, remote code execution, or Cross-site scripting. + +link:https://access.redhat.com/security/cve/CVE-2024-39249[CVE-2024-39249]:: +A flaw was found in the async Node.js package. A Regular expression Denial of Service (ReDoS) attack can potentially be triggered via the autoinject function while parsing specially crafted input. + diff --git a/modules/release-notes/snip-fixed-security-issues-in-rpm-1.3.0.adoc b/modules/release-notes/snip-fixed-security-issues-in-rpm-1.3.0.adoc new file mode 100644 index 0000000000..cf700007bc --- /dev/null +++ b/modules/release-notes/snip-fixed-security-issues-in-rpm-1.3.0.adoc @@ -0,0 +1,92 @@ += RHEL 9 platform RPM updates +link:https://access.redhat.com/security/cve/CVE-2023-52439[CVE-2023-52439]:: +A flaw was found in the Linux kernel’s uio subsystem. A use-after-free memory flaw in the uio_open functionality allows a local user to crash or escalate their privileges on the system. + +link:https://access.redhat.com/security/cve/CVE-2023-52884[CVE-2023-52884]:: +In the Linux kernel, the following vulnerability has been resolved: +Input: cyapa - add missing input core locking to suspend/resume functions + +link:https://access.redhat.com/security/cve/CVE-2024-26739[CVE-2024-26739]:: +A use-after-free flaw was found in net/sched/act_mirred.c in the Linux kernel. This may result in a crash. + +link:https://access.redhat.com/security/cve/CVE-2024-26929[CVE-2024-26929]:: +In the Linux kernel, the following vulnerability has been resolved: +scsi: qla2xxx: Fix double free of fcport + +link:https://access.redhat.com/security/cve/CVE-2024-26930[CVE-2024-26930]:: +A vulnerability was found in the Linux kernel. A potential double-free in the pointer ha->vp_map exists in the Linux kernel in drivers/scsi/qla2xxx/qla_os.c. + +link:https://access.redhat.com/security/cve/CVE-2024-26931[CVE-2024-26931]:: +In the Linux kernel, the following vulnerability has been resolved: +scsi: qla2xxx: Fix command flush on cable pull + +link:https://access.redhat.com/security/cve/CVE-2024-26947[CVE-2024-26947]:: +A flaw was found in the Linux kernel’s ARM memory management functionality, where certain memory layouts cause a kernel panic. This flaw allows an attacker who can specify or alter memory layouts to cause a denial of service. + +link:https://access.redhat.com/security/cve/CVE-2024-26991[CVE-2024-26991]:: +A flaw was found in the Linux Kernel. A lpage_info overflow can occur when checking attributes. This may lead to a crash. + +link:https://access.redhat.com/security/cve/CVE-2024-27022[CVE-2024-27022]:: +In the Linux kernel, the following vulnerability has been resolved: +fork: defer linking file vma until vma is fully initialized + +link:https://access.redhat.com/security/cve/CVE-2024-35895[CVE-2024-35895]:: +In the Linux kernel, the following vulnerability has been resolved: +bpf, sockmap: Prevent lock inversion deadlock in map delete elem + +link:https://access.redhat.com/security/cve/CVE-2024-36016[CVE-2024-36016]:: +In the Linux kernel, the following vulnerability has been resolved: +tty: n_gsm: fix possible out-of-bounds in gsm0_receive() + +link:https://access.redhat.com/security/cve/CVE-2024-36899[CVE-2024-36899]:: +In the Linux kernel, the following vulnerability has been resolved: +gpiolib: cdev: Fix use after free in lineinfo_changed_notify + +link:https://access.redhat.com/security/cve/CVE-2024-38562[CVE-2024-38562]:: +In the Linux kernel, the following vulnerability has been resolved: +wifi: nl80211: Avoid address calculations via out of bounds array indexing + +link:https://access.redhat.com/security/cve/CVE-2024-38570[CVE-2024-38570]:: +In the Linux kernel, the following vulnerability has been resolved: +gfs2: Fix potential glock use-after-free on unmount + +link:https://access.redhat.com/security/cve/CVE-2024-38573[CVE-2024-38573]:: +A NULL pointer dereference flaw was found in cppc_cpufreq_get_rate() in the Linux kernel. This issue may result in a crash. + +link:https://access.redhat.com/security/cve/CVE-2024-38601[CVE-2024-38601]:: +In the Linux kernel, the following vulnerability has been resolved: +ring-buffer: Fix a race between readers and resize checks + +link:https://access.redhat.com/security/cve/CVE-2024-38615[CVE-2024-38615]:: +In the Linux kernel, the following vulnerability has been resolved: +cpufreq: exit() callback is optional + +link:https://access.redhat.com/security/cve/CVE-2024-39331[CVE-2024-39331]:: +A flaw was found in Emacs. Arbitrary shell commands can be executed without prompting when an Org mode file is opened or when the Org mode is enabled, when Emacs is used as an email client, this issue can be triggered when previewing email attachments. + +link:https://access.redhat.com/security/cve/CVE-2024-40984[CVE-2024-40984]:: +In the Linux kernel, the following vulnerability has been resolved: +ACPICA: Revert "ACPICA: avoid Info: mapping multiple BARs. Your kernel is fine." + +link:https://access.redhat.com/security/cve/CVE-2024-41071[CVE-2024-41071]:: +An out-of-bounds buffer overflow has been found in the Linux kernel’s mac80211 subsystem when scanning for SSIDs. Address calculation using out-of-bounds array indexing could result in an attacker crafting an exploit, resulting in the complete compromise of a system. + +link:https://access.redhat.com/security/cve/CVE-2024-42225[CVE-2024-42225]:: +A potential flaw was found in the Linux kernel’s MediaTek WiFi, where it was reusing uninitialized data. This flaw allows a local user to gain unauthorized access to some data potentially. + +link:https://access.redhat.com/security/cve/CVE-2024-42246[CVE-2024-42246]:: +In the Linux kernel, the following vulnerability has been resolved: +net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket + +link:https://access.redhat.com/security/cve/CVE-2024-45490[CVE-2024-45490]:: +A flaw was found in libexpat's xmlparse.c component. This vulnerability allows an attacker to cause improper handling of XML data by providing a negative length value to the XML_ParseBuffer function. + +link:https://access.redhat.com/security/cve/CVE-2024-45491[CVE-2024-45491]:: +An issue was found in libexpat’s internal dtdCopy function in xmlparse.c, It can have an integer overflow for nDefaultAtts on 32-bit platforms where UINT_MAX equals SIZE_MAX. + +link:https://access.redhat.com/security/cve/CVE-2024-45492[CVE-2024-45492]:: +A flaw was found in libexpat's internal nextScaffoldPart function in xmlparse.c. It can have an integer overflow for m_groupSize on 32-bit platforms where UINT_MAX equals SIZE_MAX. + +link:https://access.redhat.com/security/cve/CVE-2024-6119[CVE-2024-6119]:: +A flaw was found in OpenSSL. Applications performing certificate name checks (e.g., TLS clients checking server certificates) may attempt to read an invalid memory address resulting in abnormal termination of the application process. + diff --git a/modules/release-notes/snip-known-issue-rhidp-3931.adoc b/modules/release-notes/snip-known-issue-rhidp-3931.adoc new file mode 100644 index 0000000000..8057f17bc0 --- /dev/null +++ b/modules/release-notes/snip-known-issue-rhidp-3931.adoc @@ -0,0 +1,10 @@ +[id="known-issue-rhidp-3931"] += Entities of repositories under a configured org in catalog-backend-module-github-org plugin are not deleted from the catalog when the imported repository is deleted from bulk imports + +Repositories might be added to Developer Hub from various sources (like statically in an app-config file or dynamically when enabling GitHub discovery). By design, the bulk import plugin will only track repositories that are accessible from the configured GitHub integrations. +When both the Bulk Import and the GitHub Discovery plugins are enabled, the repositories the latter discovers might be listed in the Bulk Import pages. +However, attempting to delete a repository added by the discovery plugin from the Bulk Import Jobs may have no effect, as any entities registered from this repository might still be present in the Developer Hub catalog. +There is unfortunately no known workaround yet. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3931[RHIDP-3931] diff --git a/modules/release-notes/snip-known-issue-rhidp-4067.adoc b/modules/release-notes/snip-known-issue-rhidp-4067.adoc new file mode 100644 index 0000000000..021e415dd0 --- /dev/null +++ b/modules/release-notes/snip-known-issue-rhidp-4067.adoc @@ -0,0 +1,7 @@ +[id="known-issue-rhidp-4067"] += Bulk Import: Added repositories count is incorrect + +Only the first 20 repositories (in alphabetical order) can be displayed at most on the Bulk Import Added Repositories page. Also, the count of Added Repositories displayed might be wrong. In future releases, we plan to address this with proper pagination. Meanwhile, as a workaround, searching would still work against all Added Repositories. So you can still search any Added Repository and get it listed on the table. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4067[RHIDP-4067] diff --git a/modules/release-notes/snip-known-issue-rhidp-4069.adoc b/modules/release-notes/snip-known-issue-rhidp-4069.adoc new file mode 100644 index 0000000000..085fcb64c7 --- /dev/null +++ b/modules/release-notes/snip-known-issue-rhidp-4069.adoc @@ -0,0 +1,9 @@ +[id="known-issue-rhidp-4069"] += Conditional alias `$ownerRefs` does not work + +A conditional alias that uses `$ownerRefs` doesn't work. + +There is no workaround. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4069[RHIDP-4069] diff --git a/modules/release-notes/snip-known-issue-rhidp-4378.adoc b/modules/release-notes/snip-known-issue-rhidp-4378.adoc new file mode 100644 index 0000000000..e4cb4a1826 --- /dev/null +++ b/modules/release-notes/snip-known-issue-rhidp-4378.adoc @@ -0,0 +1,9 @@ +[id="known-issue-rhidp-4378"] += Admin users should have high privilage + +A conditional alias that uses `$ownerRefs` doesn't work. + +There is no workaround. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4378[RHIDP-4378] diff --git a/modules/release-notes/snip-removed-functionality-rhidp-3048.adoc b/modules/release-notes/snip-removed-functionality-rhidp-3048.adoc new file mode 100644 index 0000000000..223c4db7aa --- /dev/null +++ b/modules/release-notes/snip-removed-functionality-rhidp-3048.adoc @@ -0,0 +1,11 @@ +[id="removed-functionality-rhidp-3048"] += The 'dynamic-plugins' config map is named dynamically + +Before this update, the dynamic-plugins config map name was hardcoded. +Therefore, it was not possible to install two {product} helm charts in the same namespace. + +With this update, the dynamic-plugins config map is named dynamically based on the deployment name similar to how all other components names are generated. +When upgrading from a previous chart you might need to manually update that section of your `values.yaml` file to pull in the correct config map. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3048[RHIDP-3048] diff --git a/modules/release-notes/snip-removed-functionality-rhidp-3074.adoc b/modules/release-notes/snip-removed-functionality-rhidp-3074.adoc new file mode 100644 index 0000000000..52a31ffc18 --- /dev/null +++ b/modules/release-notes/snip-removed-functionality-rhidp-3074.adoc @@ -0,0 +1,10 @@ +[id="removed-functionality-rhidp-3074"] += Signing in without user in the software catalog is now disabled by default + +By default, it is now required for the user entity to exist in the software catalog to allow sign in. +This is required for production ready deployments since identities need to exist and originate from a trusted source (i.e. the Identity Provider) in order for security controls such as RBAC and Audit logging to be effective. +To bypass this, enable the `dangerouslySignInWithoutUserInCatalog` configuration that allows sign in without the user being in the catalog. +Enabling this option is dangerous as it might allow unauthorized users to gain access. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3074[RHIDP-3074] diff --git a/modules/release-notes/snip-removed-functionality-rhidp-3187.adoc b/modules/release-notes/snip-removed-functionality-rhidp-3187.adoc new file mode 100644 index 0000000000..69ddb3af71 --- /dev/null +++ b/modules/release-notes/snip-removed-functionality-rhidp-3187.adoc @@ -0,0 +1,27 @@ +[id="removed-functionality-rhidp-3187"] += {company-name} and Community Technology Preview (TP) plugins and actions are disabled by default + +Before this update, some {company-name} and Community Technology Preview (TP) plugins and actions were enabled by default: + +.Technology Preview plugins +* @backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor (changing in RHIDP-3643) + +.Community Support plugins +* @backstage/plugin-scaffolder-backend-module-azure +* @backstage/plugin-scaffolder-backend-module-bitbucket-cloud +* @backstage/plugin-scaffolder-backend-module-bitbucket-server +* @backstage/plugin-scaffolder-backend-module-gerrit +* @backstage/plugin-scaffolder-backend-module-github +* @backstage/plugin-scaffolder-backend-module-gitlab +* @roadiehq/scaffolder-backend-module-http-request +* @roadiehq/scaffolder-backend-module-utils + +With this update, all plugins included under the link:https://access.redhat.com/support/offerings/techpreview[Technology Preview scope of support], whether from {company-name} or the community, are disabled by default. + +.Procedure +* If your workload requires these plugins, enable them in your custom resource or configmap using `disabled: false`. + +//See https://github.com/redhat-developer/red-hat-developer-hub/blob/main/dynamic-plugins.default.yaml for examples. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3187[RHIDP-3187] diff --git a/modules/release-notes/snip-removed-functionality-rhidp-4293.adoc b/modules/release-notes/snip-removed-functionality-rhidp-4293.adoc new file mode 100644 index 0000000000..b297fda986 --- /dev/null +++ b/modules/release-notes/snip-removed-functionality-rhidp-4293.adoc @@ -0,0 +1,36 @@ +[id="removed-functionality-rhidp-4293"] += Plugins with updated scope + +With this update, three plugins previously under the `@janus-idp` scope have moved to `@backstage-community`: + +[%header,cols=2*] +|=== +|*RHDH 1.2 Plugin Name* |*RHDH 1.3 Plugin Name* + +| `@janus-idp/backstage-plugin-argocd` +| `@backstage-community/plugin-redhat-argocd` + +| `@janus-idp/backstage-plugin-3scale-backend` +| `@backstage-community/plugin-3scale-backend` + +| `@janus-idp/backstage-plugin-catalog-backend-module-scaffolder-relation-processor` +| `@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor` +|=== + +As the scope of the previous plugins has been updated, the dynamic plugin configuration has also changed. + +[%header,cols=2*] +|=== +|*RHDH 1.2 Configuration* |*RHDH 1.3 Configuration* + +| link:https://github.com/janus-idp/backstage-showcase/blob/1.2.x/dynamic-plugins.default.yaml[dynamic-plugins.default.yaml] +| link:https://github.com/janus-idp/backstage-showcase/blob/release-1.3/dynamic-plugins.default.yaml[dynamic-plugins.default.yaml] +|=== + +.Procedure +* If your workload requires plugins with an updated scope, revise your configuration to use the latest plugins from the new scope. + +//See https://github.com/redhat-developer/red-hat-developer-hub/blob/main/dynamic-plugins.default.yaml for examples. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-4293[RHIDP-4293] diff --git a/modules/release-notes/snip-technology-preview-rhidp-1397.adoc b/modules/release-notes/snip-technology-preview-rhidp-1397.adoc new file mode 100644 index 0000000000..3f9e08f2e2 --- /dev/null +++ b/modules/release-notes/snip-technology-preview-rhidp-1397.adoc @@ -0,0 +1,10 @@ +[id="technology-preview-rhidp-1397"] += Registering existing entities from multiple Git repositories simultaneously + +With this update, you can register entities from multiple repositories simultaneously, without the need to register them individually. + +For repositories without a `catalog-entity.yaml` file, the plugin creates a pull request. +Once the pull request is merged, {product-short} registers the entity in the software catalog. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-1397[RHIDP-1397] diff --git a/modules/release-notes/snip-technology-preview-rhidp-3713.adoc b/modules/release-notes/snip-technology-preview-rhidp-3713.adoc new file mode 100644 index 0000000000..a5a17cf39a --- /dev/null +++ b/modules/release-notes/snip-technology-preview-rhidp-3713.adoc @@ -0,0 +1,8 @@ +[id="technology-preview-rhidp-3713"] += Added the catalog backend module logs plugin + +With this update, {product-short} includes the `@backstage/plugin-catalog-backend-module-logs` plugin as a dynamic plugin to help surface catalog errors into the logs. +This dynamic plugin is disabled by default. + +.Additional resources +* link:https://issues.redhat.com/browse/RHIDP-3713[RHIDP-3713] diff --git a/modules/templates/proc-adding-templates.adoc b/modules/templates/proc-adding-templates.adoc new file mode 100644 index 0000000000..5f421cc8de --- /dev/null +++ b/modules/templates/proc-adding-templates.adoc @@ -0,0 +1,39 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-admin-templates.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-adding-templates_{context}"] += Importing an existing template to {product} + +You can add an existing template to your {product} instance by using the Catalog Processor. + +.Prerequisites + +* You have created a directory or repository that contains at least one template YAML file. +* If you want to use a template that is stored in a repository such as GitHub or GitLab, you must configure a {product} integration for your provider. + +.Procedure + +* In the `app-config.yaml` configuration file, modify the `catalog.rules` section to include a rule for templates, and configure the `catalog.locations` section to point to the template that you want to add, as shown in the following example: ++ +[source,yaml] +---- +# ... +catalog: + rules: + - allow: [Template] # <1> + locations: + - type: url # <2> + target: https:///example-template.yaml # <3> +# ... +---- +<1> To allow new templates to be added to the catalog, you must add a `Template` rule. +<2> If you are importing templates from a repository, such as GitHub or GitLab, use the `url` type. +<3> Specify the URL for the template. + +.Verification + +. Click the *Catalog* tab in the navigation panel. +. In the *Kind* drop-down menu, select *Template*. +. Confirm that your template is shown in the list of existing templates. diff --git a/modules/templates/proc-creating-templates.adoc b/modules/templates/proc-creating-templates.adoc new file mode 100644 index 0000000000..56f0f6dccb --- /dev/null +++ b/modules/templates/proc-creating-templates.adoc @@ -0,0 +1,54 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-admin-templates.adoc + +:_mod-docs-content-type: PROCEDURE +[id="proc-creating-templates_{context}"] += Creating a template by using the Template Editor + +You can create a template by using the Template Editor. + +.Procedure + +. Access the Template Editor by using one of the following options: ++ +image::rhdh/template-editor.png[Template Editor] +** Open the URL `\https:///create/edit` for your {product} instance. +** Click *Create...* in the navigation menu of the {product} console, then click the overflow menu button and select *Template editor*. +. Click *Edit Template Form*. +. Optional: Modify the YAML definition for the parameters of your template. For more information about these parameters, see <>. +. In the *Name ** field, enter a unique name for your template. +. From the *Owner* drop-down menu, choose an owner for the template. +. Click *Next*. +. In the *Repository Location* view, enter the following information about the hosted repository that you want to publish the template to: +.. Select an available *Host* from the drop-down menu. ++ +-- +[NOTE] +==== +Available hosts are defined in the YAML parameters by the `allowedHosts` field: + +.Example YAML +[source,yaml] +---- +# ... + ui:options: + allowedHosts: + - github.com +# ... +---- + +==== +-- + +.. In the *Owner ** field, enter an organization, user or project that the hosted repository belongs to. +.. In the *Repository ** field, enter the name of the hosted repository. +.. Click *Review*. + +. Review the information for accuracy, then click *Create*. + +.Verification + +. Click the *Catalog* tab in the navigation panel. +. In the *Kind* drop-down menu, select *Template*. +. Confirm that your template is shown in the list of existing templates. diff --git a/modules/templates/ref-creating-templates.adoc b/modules/templates/ref-creating-templates.adoc new file mode 100644 index 0000000000..943a880e95 --- /dev/null +++ b/modules/templates/ref-creating-templates.adoc @@ -0,0 +1,76 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly-admin-templates.adoc + +:_mod-docs-content-type: REFERENCE +[id="ref-creating-templates_{context}"] += Creating a template as a YAML file + +You can create a template by defining a `Template` object as a YAML file. + +The `Template` object describes the template and its metadata. It also contains required input variables and a list of actions that are executed by the scaffolding service. + +.`Template` object example +[source,yaml] +---- +apiVersion: scaffolder.backstage.io/v1beta3 +kind: Template +metadata: + name: template-name # <1> + title: Example template # <2> + description: An example template for v1beta3 scaffolder. # <3> +spec: + owner: backstage/techdocs-core # <4> + type: service # <5> + parameters: # <6> + - title: Fill in some steps + required: + - name + properties: + name: + title: Name + type: string + description: Unique name of the component + owner: + title: Owner + type: string + description: Owner of the component + - title: Choose a location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + steps: # <7> + - id: fetch-base + name: Fetch Base + action: fetch:template + # ... + output: # <8> + links: + - title: Repository # <9> + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog # <10> + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} +# ... +---- +<1> Specify a name for the template. +<2> Specify a title for the template. This is the title that is visible on the template tile in the *Create...* view. +<3> Specify a description for the template. This is the description that is visible on the template tile in the *Create...* view. +<4> Specify the ownership of the template. The `owner` field provides information about who is responsible for maintaining or overseeing the template within the system or organization. In the provided example, the `owner` field is set to `backstage/techdocs-core`. This means that this template belongs to the `techdocs-core` project in the `backstage` namespace. +<5> Specify the component type. Any string value is accepted for this required field, but your organization should establish a proper taxonomy for these. {product} instances may read this field and behave differently depending on its value. For example, a `website` type component may present tooling in the {product} interface that is specific to just websites. ++ +The following values are common for this field: ++ +-- +`service`:: A backend service, typically exposing an API. +`website`:: A website. +`library`:: A software library, such as an npm module or a Java library. +-- +<6> Use the `parameters` section to specify parameters for user input that are shown in a form view when a user creates a component by using the template in the {product} console. Each `parameters` subsection, defined by a title and properties, creates a new form page with that definition. +<7> Use the `steps` section to specify steps that are executed in the backend. These steps must be defined by using a unique step ID, a name, and an action. You can view actions that are available on your {product} instance by visiting the URL `\https:///create/actions`. +<8> Use the `output` section to specify the structure of output data that is created when the template is used. The `output` section, particularly the `links` subsection, provides valuable references and URLs that users can utilize to access and interact with components that are created from the template. +<9> Provides a reference or URL to the repository associated with the generated component. +<10> Provides a reference or URL that allows users to open the generated component in a catalog or directory where various components are listed. diff --git a/modules/upgrade/proc-upgrade-rhdh-helm.adoc b/modules/upgrade/proc-upgrade-rhdh-helm.adoc new file mode 100644 index 0000000000..02d8e20179 --- /dev/null +++ b/modules/upgrade/proc-upgrade-rhdh-helm.adoc @@ -0,0 +1,49 @@ +// Module included in the following assemblies +// + +:_mod-docs-content-type: PROCEDURE +[id="proc-upgrade-rhdh-helm_{context}"] + += Upgrading the {product} Helm Chart + +You can upgrade to a later version of {product} in {ocp-short} by using either the web console or the CLI. + +* {ocp-short} web console + +. In the *Developer* perspective, click *Helm* to open the *Helm Releases* tab. + +. Click the overflow menu on the Helm release that you want to use and select *Upgrade*. + +. On the *Upgrade Helm Release* page, select the version of {product-short} that you want to upgrade to from the chart version drop-down list. + +. Click *Upgrade*. ++ +[NOTE] +==== +It might take a few minutes to delete the resources in the older versions and to start the newer versions of the {product-short} pods. +==== + +. Close all open {product-short} web pages, and log in again to verify that the upgrade was successful. + +* {ocp-short} CLI + +. Log in to the {ocp-short} cluster as the cluster administrator and switch to the project or namespace in which {product-short} was installed. ++ +[source,terminal] +---- +oc login -u -p https://api.:6443 +oc project +---- + +. For a new version of the {product-short} Helm chart, run the following upgrade command: ++ +[source,terminal,subs="attributes+"] +---- +helm upgrade -i rhdh -f new-values.yml \ + openshift-helm-charts/redhat-developer-hub --version {product-chart-version} +---- ++ +[NOTE] +==== +You can also provide extra values to the chart by creating a `new-values.yml` file on your workstation with values that override the attributes in the installed chart or by adding new attributes. +==== diff --git a/modules/upgrade/proc-upgrade-rhdh-operator.adoc b/modules/upgrade/proc-upgrade-rhdh-operator.adoc new file mode 100644 index 0000000000..bfc3fca75e --- /dev/null +++ b/modules/upgrade/proc-upgrade-rhdh-operator.adoc @@ -0,0 +1,41 @@ +// Module included in the following assemblies +// + +:_mod-docs-content-type: PROCEDURE +[id="proc-upgrade-rhdh-operator_{context}"] + += Upgrading the {product} Operator + +If you use the Operator to deploy your {product} instance, then an administrator can use the {ocp-short} web console to upgrade the Operator to a later version. + +{ocp-short} is currently supported from version {ocp-version-min} to {ocp-version}. See also the link:https://access.redhat.com/support/policy/updates/developerhub[{product} Life Cycle]. + +.Prerequisites + +* You are logged in as an administrator on the {ocp-short} web console. +* You have installed the {product} Operator. +* You have configured the appropriate roles and permissions within your project to create or access an application. For more information, see the link:https://docs.openshift.com/container-platform/{ocp-version}/applications/index.html[{ocp-brand-name} documentation on Building applications]. + +.Procedure + +. In the *Administrator* perspective of the {ocp-short} web console, click *Operators > Installed Operators*. +. On the *Installed Operators* page, click *{product} Operator*. +. On the *{product} Operator* page, click the *Subscription* tab. +. From the *Upgrade status* field on the *Subscription details* page, click *Upgrade available*. ++ +[NOTE] +==== +If there is no upgrade available, the *Upgrade status* field value is *Up to date*. +==== ++ +. On the *InstallPlan details* page, click *Preview InstallPlan > Approve*. + +.Verification + +* The *Upgrade status* field value on the *Subscription details* page is *Up to date*. + +[role="_additional-resources"] +.Additional resources + +* link:{installing-on-ocp-book-url}#proc-install-operator_assembly-install-rhdh-ocp-operator[{installing-on-ocp-book-title} with the Operator]. +* link:https://docs.openshift.com/container-platform/{ocp-version}/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..04cfbe7c34 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +Jinja2>=3.1.4 +jira>=3.8.0 +pip>=21.3.1 +pyyaml>=6.0.2 +tox>=4.21.0 diff --git a/titles/admin-rhdh/artifacts b/titles/admin-rhdh/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/admin-rhdh/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/admin-rhdh/assemblies b/titles/admin-rhdh/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/admin-rhdh/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/admin-rhdh/docinfo.xml b/titles/admin-rhdh/docinfo.xml new file mode 100644 index 0000000000..f86bb2f376 --- /dev/null +++ b/titles/admin-rhdh/docinfo.xml @@ -0,0 +1,12 @@ +Administration guide for {product} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. As an administrative user, you can manage roles and permissions of other users and configure {product-short} to meet the specific needs of your organization. + + + {company-name} Customer Content Services + + diff --git a/titles/admin-rhdh/images b/titles/admin-rhdh/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/admin-rhdh/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/admin-rhdh/master.adoc b/titles/admin-rhdh/master.adoc new file mode 100644 index 0000000000..e9369eb549 --- /dev/null +++ b/titles/admin-rhdh/master.adoc @@ -0,0 +1 @@ +include::title-admin.adoc[] \ No newline at end of file diff --git a/titles/admin-rhdh/modules b/titles/admin-rhdh/modules new file mode 120000 index 0000000000..36719b9de7 --- /dev/null +++ b/titles/admin-rhdh/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/titles/admin-rhdh/title-admin.adoc b/titles/admin-rhdh/title-admin.adoc new file mode 100644 index 0000000000..aeada5a085 --- /dev/null +++ b/titles/admin-rhdh/title-admin.adoc @@ -0,0 +1,47 @@ +[id="title-admin"] +include::artifacts/attributes.adoc[] += Administration guide for {product} +:context: admin-rhdh +:doctype: book +:imagesdir: images + +{product} is an enterprise-grade, open developer platform that you can use to build developer portals. This platform contains a supported and opinionated framework that helps reduce the friction and frustration of developers while boosting their productivity. + +//customer support links +include::artifacts/snip-customer-support-info.adoc[] + +//add a custom application config file to OCP +include::assemblies/assembly-add-custom-app-file-openshift.adoc[leveloffset=+1] + +// configure PostgreSQL database +include::assemblies/assembly-configuring-external-postgresql-databases.adoc[leveloffset=+1] + +// configure RHDH with TLS in kubernetes +include::modules/installation/proc-configuring-an-rhdh-instance-with-tls-in-kubernetes.adoc[leveloffset=+1] + +//configure dynamic plugins - moved to configuring plugins guide +// include::modules/installation/proc-config-dynamic-plugins-rhdh-operator.adoc[leveloffset=+1] + +//Telemetry data collection +include::assemblies/assembly-rhdh-telemetry.adoc[leveloffset=+1] + +// Observability +include::assemblies/assembly-rhdh-observability.adoc[leveloffset=+1] + +// Running RHDH behind a proxy +include::assemblies/assembly-running-rhdh-behind-a-proxy.adoc[leveloffset=+1] + +//aws integration +include::assemblies/assembly-rhdh-integration-aws.adoc[leveloffset=+1] + +//aks integration +include::assemblies/assembly-rhdh-integration-aks.adoc[leveloffset=+1] + +// Managing templates +include::assemblies/assembly-admin-templates.adoc[leveloffset=+1] + +// techdocs plugin +include::assemblies/assembly-techdocs-plugin.adoc[leveloffset=+1] + +// RHDH Operator deployment +include::modules//admin/proc-rhdh-deployment-config.adoc[leveloffset=+1] \ No newline at end of file diff --git a/titles/authentication/artifacts b/titles/authentication/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/authentication/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/authentication/assemblies b/titles/authentication/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/authentication/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/authentication/docinfo.xml b/titles/authentication/docinfo.xml new file mode 100644 index 0000000000..5f7fe2ebac --- /dev/null +++ b/titles/authentication/docinfo.xml @@ -0,0 +1,11 @@ +{title} +{product} +{product-version} +{subtitle} + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/authentication/images b/titles/authentication/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/authentication/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/authentication/master.adoc b/titles/authentication/master.adoc new file mode 100644 index 0000000000..ec8f76a004 --- /dev/null +++ b/titles/authentication/master.adoc @@ -0,0 +1,14 @@ +include::artifacts/attributes.adoc[] +:context: title-authentication +:doctype: book +:imagesdir: images +:title: Authentication +:subtitle: Configuring authentication to external services in {product} +:abstract: As a {product} platform engineer, you can manage authentication of other users to meet the specific needs of your organization. +//[id="{context}"] +//= {title} + +//{abstract} + +include::assemblies/assembly-enabling-authentication.adoc[] + diff --git a/titles/authentication/modules b/titles/authentication/modules new file mode 120000 index 0000000000..36719b9de7 --- /dev/null +++ b/titles/authentication/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/titles/authorization/artifacts b/titles/authorization/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/authorization/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/authorization/assemblies b/titles/authorization/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/authorization/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/authorization/docinfo.xml b/titles/authorization/docinfo.xml new file mode 100644 index 0000000000..5f7fe2ebac --- /dev/null +++ b/titles/authorization/docinfo.xml @@ -0,0 +1,11 @@ +{title} +{product} +{product-version} +{subtitle} + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/authorization/images b/titles/authorization/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/authorization/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/authorization/master.adoc b/titles/authorization/master.adoc new file mode 100644 index 0000000000..2607139127 --- /dev/null +++ b/titles/authorization/master.adoc @@ -0,0 +1,13 @@ +include::artifacts/attributes.adoc[] +:context: title-authorization +:doctype: book +:imagesdir: images +:title: Authorization +:subtitle: Configuring authorization by using role based access control (RBAC) in {product} +:abstract: As a {product} platform engineer, you can manage authorizations of other users by using role based access control (RBAC) to meet the specific needs of your organization. +//[id="{context}"] +//= {title} + +//{abstract} + +include::assemblies/assembly-configuring-authorization-in-rhdh.adoc[] diff --git a/titles/authorization/modules b/titles/authorization/modules new file mode 120000 index 0000000000..36719b9de7 --- /dev/null +++ b/titles/authorization/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/titles/discover/artifacts b/titles/discover/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/discover/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/discover/assemblies b/titles/discover/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/discover/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/discover/docinfo.xml b/titles/discover/docinfo.xml new file mode 100644 index 0000000000..5f7fe2ebac --- /dev/null +++ b/titles/discover/docinfo.xml @@ -0,0 +1,11 @@ +{title} +{product} +{product-version} +{subtitle} + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/discover/images b/titles/discover/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/discover/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/discover/master.adoc b/titles/discover/master.adoc new file mode 100644 index 0000000000..961e87c940 --- /dev/null +++ b/titles/discover/master.adoc @@ -0,0 +1,14 @@ +include::artifacts/attributes.adoc[] +:context: title-discover +:doctype: book +:imagesdir: images +:title: Discover +:subtitle: Introduction to {product} +:abstract: {product} is a developer platform designed to build developer portals. Use {product} to provide a streamlined development environment with a centralized software catalog to build high-quality software efficiently. + +//[id="{context}"] +//= {title} + +//{abstract} + +include::assemblies/assembly_about-rhdh.adoc[] diff --git a/titles/discover/modules b/titles/discover/modules new file mode 120000 index 0000000000..36719b9de7 --- /dev/null +++ b/titles/discover/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/titles/getting-started-rhdh/artifacts b/titles/getting-started-rhdh/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/getting-started-rhdh/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/getting-started-rhdh/assemblies b/titles/getting-started-rhdh/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/getting-started-rhdh/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/getting-started-rhdh/docinfo.xml b/titles/getting-started-rhdh/docinfo.xml new file mode 100644 index 0000000000..f3d648c9e0 --- /dev/null +++ b/titles/getting-started-rhdh/docinfo.xml @@ -0,0 +1,12 @@ +Getting started with {product} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. You can configure and customize your {product-short} instance to meet your needs and preferences. + + + {company-name} Customer Content Services + + \ No newline at end of file diff --git a/titles/getting-started-rhdh/images b/titles/getting-started-rhdh/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/getting-started-rhdh/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/getting-started-rhdh/master.adoc b/titles/getting-started-rhdh/master.adoc new file mode 100644 index 0000000000..c8e00e16da --- /dev/null +++ b/titles/getting-started-rhdh/master.adoc @@ -0,0 +1 @@ +include::title-getting-started.adoc[] \ No newline at end of file diff --git a/titles/getting-started-rhdh/modules b/titles/getting-started-rhdh/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/getting-started-rhdh/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/getting-started-rhdh/title-getting-started.adoc b/titles/getting-started-rhdh/title-getting-started.adoc new file mode 100644 index 0000000000..b583a77e42 --- /dev/null +++ b/titles/getting-started-rhdh/title-getting-started.adoc @@ -0,0 +1,36 @@ +[id='title-getting-started'] +include::artifacts/attributes.adoc[] += Getting started with {product} +:context: rhdh-getting-started +:doctype: book +:imagesdir: images + +As a developer, you can use {product} to experience a streamlined development environment. {product} is driven by a centralized software catalog, providing efficiency to your microservices and infrastructure. It enables your product team to deliver quality code without any compromises. + +//customer support links +include::artifacts/snip-customer-support-info.adoc[] + +// rhdh overview +include::modules/installation/con-rhdh-overview.adoc[leveloffset=+1] + +// Sizing +include::modules/getting-started/ref-rhdh-sizing.adoc[leveloffset=+1] + +// supported configs and customization +include::modules/getting-started/ref-rhdh-supported-configs.adoc[leveloffset=+1] +include::assemblies/assembly-add-custom-app-file-openshift.adoc[leveloffset=+2] + +include::assemblies/assembly-bulk-importing-from-github.adoc[leveloffset=+1] + +include::modules/getting-started/proc-customize-rhdh-homepage.adoc[leveloffset=+1] +include::modules/getting-started/proc-customize-rhdh-tech-radar-page.adoc[leveloffset=+1] +include::modules/getting-started/proc-customize-rhdh-learning-paths.adoc[leveloffset=+1] + +include::assemblies/assembly-customize-rhdh-theme.adoc[leveloffset=+1] + +include::modules/getting-started/con-servicenow-custom-actions.adoc[leveloffset=+1] +include::modules/getting-started/proc-enable-servicenow-custom-actions-plugin.adoc[leveloffset=+2] +include::modules/getting-started/ref-supported-servicenow-custom-actions.adoc[leveloffset=+2] + +// Audit logging +include::assemblies/assembly-audit-log.adoc[leveloffset=+1] diff --git a/titles/install-rhdh-air-gapped/artifacts b/titles/install-rhdh-air-gapped/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/install-rhdh-air-gapped/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/install-rhdh-air-gapped/assemblies b/titles/install-rhdh-air-gapped/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/install-rhdh-air-gapped/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/install-rhdh-air-gapped/docinfo.xml b/titles/install-rhdh-air-gapped/docinfo.xml new file mode 100644 index 0000000000..f86cb562a6 --- /dev/null +++ b/titles/install-rhdh-air-gapped/docinfo.xml @@ -0,0 +1,13 @@ +Installing {product} in an air-gapped environment +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. Administrative users can configure roles, permissions, and other settings to enable other authorized users to deploy an air-gapped {product-short} instance on any supported platform using either the Operator or Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/install-rhdh-air-gapped/images b/titles/install-rhdh-air-gapped/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/install-rhdh-air-gapped/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/install-rhdh-air-gapped/master.adoc b/titles/install-rhdh-air-gapped/master.adoc new file mode 100644 index 0000000000..f7125f047a --- /dev/null +++ b/titles/install-rhdh-air-gapped/master.adoc @@ -0,0 +1,12 @@ +[id="title-install-rhdh-air-grapped"] +include::artifacts/attributes.adoc[] += Installing {product} in an air-gapped environment +:context: title-install-rhdh-air-grapped +:doctype: book +:imagesdir: images + +include::modules/installation/con-airgapped-environment.adoc[leveloffset=+1] + +include::modules/installation/proc-install-rhdh-airgapped-environment-ocp-operator.adoc[leveloffset=+1] + +include::modules/installation/proc-install-rhdh-airgapped-environment-ocp-helm.adoc[leveloffset=+1] diff --git a/titles/install-rhdh-air-gapped/modules b/titles/install-rhdh-air-gapped/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/install-rhdh-air-gapped/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/install-rhdh-aks/artifacts b/titles/install-rhdh-aks/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/install-rhdh-aks/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/install-rhdh-aks/assemblies b/titles/install-rhdh-aks/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/install-rhdh-aks/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/install-rhdh-aks/docinfo.xml b/titles/install-rhdh-aks/docinfo.xml new file mode 100644 index 0000000000..563695acbe --- /dev/null +++ b/titles/install-rhdh-aks/docinfo.xml @@ -0,0 +1,13 @@ +Installing {product} on {aks-brand-name} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. Administrative users can configure roles, permissions, and other settings to enable other authorized users to deploy a {product-short} instance on {aks-brand-name} ({aks-short}) using either the Operator or Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/install-rhdh-aks/images b/titles/install-rhdh-aks/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/install-rhdh-aks/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/install-rhdh-aks/master.adoc b/titles/install-rhdh-aks/master.adoc new file mode 100644 index 0000000000..948c7ef8ff --- /dev/null +++ b/titles/install-rhdh-aks/master.adoc @@ -0,0 +1,20 @@ +[id="title-install-rhdh-aks"] +include::artifacts/attributes.adoc[] += Installing {product} on {aks-brand-name} +:context: title-install-rhdh-aks +:doctype: book +:imagesdir: images + +// aks deployment +//include::assemblies/assembly-install-rhdh-aks.adoc[leveloffset=+1] //RHIDP-4165: multi-chapter format + +You can install {product} on {aks-brand-name} ({aks-short}) using one of the following methods: + +* The {product} Operator +* The {product} Helm chart + +// Operator method +include::modules/installation/proc-rhdh-deploy-aks-operator.adoc[leveloffset=+1] + +// Helm chart method +include::modules/installation/proc-rhdh-deploy-aks-helm.adoc[leveloffset=+1] diff --git a/titles/install-rhdh-aks/modules b/titles/install-rhdh-aks/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/install-rhdh-aks/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/install-rhdh-eks/artifacts b/titles/install-rhdh-eks/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/install-rhdh-eks/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/install-rhdh-eks/assemblies b/titles/install-rhdh-eks/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/install-rhdh-eks/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/install-rhdh-eks/docinfo.xml b/titles/install-rhdh-eks/docinfo.xml new file mode 100644 index 0000000000..b1a2c5d42f --- /dev/null +++ b/titles/install-rhdh-eks/docinfo.xml @@ -0,0 +1,13 @@ +Installing {product} on {eks-brand-name} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. Administrative users can configure roles, permissions, and other settings to enable other authorized users to deploy a {product-short} instance on {eks-brand-name} ({eks-short}) using either the Operator or Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/install-rhdh-eks/images b/titles/install-rhdh-eks/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/install-rhdh-eks/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/install-rhdh-eks/master.adoc b/titles/install-rhdh-eks/master.adoc new file mode 100644 index 0000000000..f21ea7f614 --- /dev/null +++ b/titles/install-rhdh-eks/master.adoc @@ -0,0 +1,22 @@ +[id="title-install-rhdh-eks"] +include::artifacts/attributes.adoc[] += Installing {product} on {eks-brand-name} +:context: title-install-rhdh-eks +:doctype: book +:imagesdir: images + +// aws eks deployment +//include::assemblies/assembly-install-rhdh-eks.adoc[leveloffset=+1] //RHIDP-4165: multi-chapter format + +You can install {product} on {eks-brand-name} ({eks-short}) using one of the following methods: + +* The {product} Operator +* The {product} Helm chart + +// Operator method +include::modules/installation/proc-rhdh-deploy-eks-operator.adoc[leveloffset=+1] + +include::modules/installation/proc-deploy-rhdh-instance-eks.adoc[leveloffset=+2] + +// Helm chart method +include::modules/installation/proc-rhdh-deploy-eks-helm.adoc[leveloffset=+1] diff --git a/titles/install-rhdh-eks/modules b/titles/install-rhdh-eks/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/install-rhdh-eks/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/install-rhdh-ocp/artifacts b/titles/install-rhdh-ocp/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/install-rhdh-ocp/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/install-rhdh-ocp/assemblies b/titles/install-rhdh-ocp/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/install-rhdh-ocp/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/install-rhdh-ocp/docinfo.xml b/titles/install-rhdh-ocp/docinfo.xml new file mode 100644 index 0000000000..329d70fca5 --- /dev/null +++ b/titles/install-rhdh-ocp/docinfo.xml @@ -0,0 +1,13 @@ +Installing {product} on {ocp-short} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. Administrative users can configure roles, permissions, and other settings to enable other authorized users to deploy a {product-short} instance on {ocp-brand-name} using either the Operator or Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/install-rhdh-ocp/images b/titles/install-rhdh-ocp/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/install-rhdh-ocp/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/install-rhdh-ocp/master.adoc b/titles/install-rhdh-ocp/master.adoc new file mode 100644 index 0000000000..3c3c1a0eb5 --- /dev/null +++ b/titles/install-rhdh-ocp/master.adoc @@ -0,0 +1,39 @@ +[id="title-install-rhdh-ocp"] +include::artifacts/attributes.adoc[] += Installing {product} on {ocp-short} +:context: title-install-rhdh-ocp +:doctype: book +:imagesdir: images + +// ocp deployment +//include::assemblies/assembly-install-rhdh-ocp.adoc[leveloffset=+1] + +You can install {product} on {ocp-short} by using one of the following installers: + +The {product} Operator:: ++ +-- +* Ready for immediate use in {ocp-short} after an administrator installs it with OperatorHub +* Uses Operator Lifecycle Management (OLM) to manage automated subscription updates on {ocp-short} +* Requires preinstallation of Operator Lifecycle Management (OLM) to manage automated subscription updates on Kubernetes +-- + +The {product} Helm chart:: ++ +-- +* Ready for immediate use in both {ocp-short} and Kubernetes +* Requires manual installation and management +-- + +Use the installation method that best meets your needs and preferences. + +.Additional resources +* For more information about choosing an installation method, see link:https://www.redhat.com/en/technologies/cloud-computing/openshift/helm[Helm Charts vs. Operators] +* For more information about the Operator method, see link:https://docs.openshift.com/container-platform/4.15/operators/understanding/olm-what-operators-are.html[Understanding Operators]. +* For more information about the Helm chart method, see link:https://docs.openshift.com/container-platform/4.15/applications/working_with_helm_charts/understanding-helm.html[Understanding Helm]. + +// Operator method +include::assemblies/assembly-install-rhdh-ocp-operator.adoc[leveloffset=+1] + +// Helm chart method +include::assemblies/assembly-install-rhdh-ocp-helm.adoc[leveloffset=+1] \ No newline at end of file diff --git a/titles/install-rhdh-ocp/modules b/titles/install-rhdh-ocp/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/install-rhdh-ocp/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/install-rhdh-osd-gcp/artifacts b/titles/install-rhdh-osd-gcp/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/install-rhdh-osd-gcp/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/install-rhdh-osd-gcp/assemblies b/titles/install-rhdh-osd-gcp/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/install-rhdh-osd-gcp/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/install-rhdh-osd-gcp/docinfo.xml b/titles/install-rhdh-osd-gcp/docinfo.xml new file mode 100644 index 0000000000..f56612be77 --- /dev/null +++ b/titles/install-rhdh-osd-gcp/docinfo.xml @@ -0,0 +1,13 @@ +Installing {product} on {osd-short} on {gcp-brand-name} +{product} +{product-version} + + + + {product} is an enterprise-grade platform for building developer portals. Administrative users can configure roles, permissions, and other settings to enable other authorized users to deploy a {product-short} instance on {osd-brand-name} on {gcp-brand-name} ({gcp-short}) using either the Operator or Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/install-rhdh-osd-gcp/images b/titles/install-rhdh-osd-gcp/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/install-rhdh-osd-gcp/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/install-rhdh-osd-gcp/master.adoc b/titles/install-rhdh-osd-gcp/master.adoc new file mode 100644 index 0000000000..7df67406f1 --- /dev/null +++ b/titles/install-rhdh-osd-gcp/master.adoc @@ -0,0 +1,17 @@ +[id="title-install-rhdh-osd-gcp"] +include::artifacts/attributes.adoc[] += Installing {product} on {osd-short} on {gcp-brand-name} +:context: title-install-rhdh-osd-gcp +:doctype: book +:imagesdir: images + +You can install {product-short} on {osd-short} on {gcp-brand-name} ({gcp-short}) using one of the following methods: + +* The {product} Operator +* The {product} Helm chart + +// Operator procedure +include::modules/installation/proc-install-rhdh-osd-gcp-operator.adoc[leveloffset=+1] + +// Helm procedure +include::modules/installation/proc-install-rhdh-osd-gcp-helm.adoc[leveloffset=+1] diff --git a/titles/install-rhdh-osd-gcp/modules b/titles/install-rhdh-osd-gcp/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/install-rhdh-osd-gcp/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugin-rhdh/admin b/titles/plugin-rhdh/admin new file mode 120000 index 0000000000..87a6c0d621 --- /dev/null +++ b/titles/plugin-rhdh/admin @@ -0,0 +1 @@ +../../admin \ No newline at end of file diff --git a/titles/plugin-rhdh/artifacts b/titles/plugin-rhdh/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugin-rhdh/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugin-rhdh/assemblies b/titles/plugin-rhdh/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugin-rhdh/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugin-rhdh/docinfo.xml b/titles/plugin-rhdh/docinfo.xml new file mode 100644 index 0000000000..e8bc68e50b --- /dev/null +++ b/titles/plugin-rhdh/docinfo.xml @@ -0,0 +1,12 @@ +Configuring plugins in {product} +{product} +{product-version} + + + + Red Hat Developer Hub is a developer platform for building developer portals. You can add and configure plugins in {product-short} to access various software development tools. + + + Red Hat Customer Content Services + + \ No newline at end of file diff --git a/titles/plugin-rhdh/getting-started b/titles/plugin-rhdh/getting-started new file mode 120000 index 0000000000..557f806d93 --- /dev/null +++ b/titles/plugin-rhdh/getting-started @@ -0,0 +1 @@ +../../getting-started \ No newline at end of file diff --git a/titles/plugin-rhdh/images b/titles/plugin-rhdh/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugin-rhdh/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugin-rhdh/installation b/titles/plugin-rhdh/installation new file mode 120000 index 0000000000..863ed577be --- /dev/null +++ b/titles/plugin-rhdh/installation @@ -0,0 +1 @@ +../../installation \ No newline at end of file diff --git a/titles/plugin-rhdh/master.adoc b/titles/plugin-rhdh/master.adoc new file mode 100644 index 0000000000..3f2f762b7f --- /dev/null +++ b/titles/plugin-rhdh/master.adoc @@ -0,0 +1 @@ +include::title-plugin.adoc[] \ No newline at end of file diff --git a/titles/plugin-rhdh/modules b/titles/plugin-rhdh/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugin-rhdh/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugin-rhdh/title-plugin.adoc b/titles/plugin-rhdh/title-plugin.adoc new file mode 100644 index 0000000000..675cc78de4 --- /dev/null +++ b/titles/plugin-rhdh/title-plugin.adoc @@ -0,0 +1,54 @@ +[id='title-plugin'] +include::artifacts/attributes.adoc[] += Configuring plugins in {product} +:context: plugin-rhdh +:doctype: book +:imagesdir: images + +The {product} is an enterprise-grade, integrated developer platform, extended through plugins, that helps reduce the friction and frustration of developers while boosting their productivity. + +//customer support links +include::artifacts/snip-customer-support-info.adoc[] + +//overview +include::modules/con-rhdh-plugins.adoc[leveloffset=+1] + +//create plugin +//include::modules/rhdh-plugins-reference/proc-create-plugin.adoc[leveloffset=+1] //[HM]Include later when the ongoing development is completed. + +//Dynamic plugins +include::assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc[leveloffset=+1] + +//== Dynamic plugins +:doc-show-dynamic-content: true + +// [id="rhdh-aap"] // Deprecated +// include::artifacts/rhdh-plugins-reference/aap-backend/aap-backend-plugin-readme.adoc[leveloffset=+2] // Deprecated + +//Ansible plug-ins +[id="rhdh-ansible"] +include::modules/dynamic-plugins/con-ansible-plugin.adoc[leveloffset=+2] + +//[id="rhdh-acr"] +//include::artifacts/rhdh-plugins-reference/acr/acr-plugin-readme.adoc[leveloffset=+2] + +//[id="rhdh-jfrog"] +//include::artifacts/rhdh-plugins-reference/jfrog-artifactory/jfrog-plugin-readme.adoc[leveloffset=+2] + +[id="rhdh-keycloak"] +include::artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc[leveloffset=+2] + +[id="rhdh-nexus"] +include::artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-repository-manager-plugin-readme.adoc[leveloffset=+2] + +//[id="rhdh-quay"] +//include::artifacts/rhdh-plugins-reference/quay/quay-plugin-readme.adoc[leveloffset=+2] + +[id="rhdh-tekton"] +include::artifacts/rhdh-plugins-reference/tekton/tekton-plugin-readme.adoc[leveloffset=+2] + +[id="rhdh-argocd"] +include::artifacts/rhdh-plugins-reference/argocd/argocd-plugin-readme.adoc[leveloffset=+2] + +//[id="rhdh-topology"] +//include::artifacts/rhdh-plugins-reference/topology/topology-plugin-readme.adoc[leveloffset=+2] diff --git a/titles/plugins-rhdh-about/artifacts b/titles/plugins-rhdh-about/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-about/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-about/assemblies b/titles/plugins-rhdh-about/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-about/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-about/docinfo.xml b/titles/plugins-rhdh-about/docinfo.xml new file mode 100644 index 0000000000..5fc51a0f23 --- /dev/null +++ b/titles/plugins-rhdh-about/docinfo.xml @@ -0,0 +1,14 @@ +{title} +{product} +{product-version} + + + + The {product} ({product-very-short}) application offers a unified platform with various plugins. Using the plugin ecosystem within the {product-short} application, you can access your development infrastructure and software development tools. + + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-about/images b/titles/plugins-rhdh-about/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-about/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-about/master.adoc b/titles/plugins-rhdh-about/master.adoc new file mode 100644 index 0000000000..6029f6e4d8 --- /dev/null +++ b/titles/plugins-rhdh-about/master.adoc @@ -0,0 +1,11 @@ +[id="title-plugins-rhdh-about"] +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-about +:doctype: book +:imagesdir: images +:title: Introduction to plugins +:subtitle: Introduction to {product-very-short} plugins +:abstract: As a {product} user, you can learn about {product} plugins. + += Introduction to plugins +include::modules/dynamic-plugins/con-rhdh-plugins.adoc[leveloffset=+1] \ No newline at end of file diff --git a/titles/plugins-rhdh-about/modules b/titles/plugins-rhdh-about/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-about/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugins-rhdh-configure/artifacts b/titles/plugins-rhdh-configure/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-configure/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-configure/assemblies b/titles/plugins-rhdh-configure/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-configure/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-configure/docinfo.xml b/titles/plugins-rhdh-configure/docinfo.xml new file mode 100644 index 0000000000..e4b171f26e --- /dev/null +++ b/titles/plugins-rhdh-configure/docinfo.xml @@ -0,0 +1,13 @@ +{title} +{product} +{product-version} + + + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-configure/images b/titles/plugins-rhdh-configure/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-configure/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-configure/master.adoc b/titles/plugins-rhdh-configure/master.adoc new file mode 100644 index 0000000000..78ba275f96 --- /dev/null +++ b/titles/plugins-rhdh-configure/master.adoc @@ -0,0 +1,10 @@ +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-configure +:doctype: book +:imagesdir: images + +:title: Configuring dynamic plugins +:subtitle: Configuring dynamic plugins in {product-very-short} as a platform engineer. +:abstract: As a platform engineer, you can configure dynamic plugins in {product-very-short} to access your development infrastructure or software development tools. + +include::assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc[] \ No newline at end of file diff --git a/titles/plugins-rhdh-configure/modules b/titles/plugins-rhdh-configure/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-configure/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugins-rhdh-install/artifacts b/titles/plugins-rhdh-install/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-install/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-install/assemblies b/titles/plugins-rhdh-install/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-install/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-install/docinfo.xml b/titles/plugins-rhdh-install/docinfo.xml new file mode 100644 index 0000000000..e4b171f26e --- /dev/null +++ b/titles/plugins-rhdh-install/docinfo.xml @@ -0,0 +1,13 @@ +{title} +{product} +{product-version} + + + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-install/images b/titles/plugins-rhdh-install/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-install/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-install/master.adoc b/titles/plugins-rhdh-install/master.adoc new file mode 100644 index 0000000000..77b0ccc9af --- /dev/null +++ b/titles/plugins-rhdh-install/master.adoc @@ -0,0 +1,10 @@ +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-about +:doctype: book +:imagesdir: images + +:title: Installing and viewing dynamic plugins +:subtitle: Installing dynamic plugins +:abstract: Administrative users can install and configure plugins to enable other users to use plugins to extend {product-very-short} functionality. + +include::assemblies/dynamic-plugins/assembly-installing-rhdh-plugins.adoc[] \ No newline at end of file diff --git a/titles/plugins-rhdh-install/modules b/titles/plugins-rhdh-install/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-install/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugins-rhdh-reference/artifacts b/titles/plugins-rhdh-reference/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-reference/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-reference/assemblies b/titles/plugins-rhdh-reference/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-reference/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-reference/docinfo.xml b/titles/plugins-rhdh-reference/docinfo.xml new file mode 100644 index 0000000000..e4b171f26e --- /dev/null +++ b/titles/plugins-rhdh-reference/docinfo.xml @@ -0,0 +1,13 @@ +{title} +{product} +{product-version} + + + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-reference/images b/titles/plugins-rhdh-reference/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-reference/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-reference/master.adoc b/titles/plugins-rhdh-reference/master.adoc new file mode 100644 index 0000000000..0b8b2c03c6 --- /dev/null +++ b/titles/plugins-rhdh-reference/master.adoc @@ -0,0 +1,10 @@ +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-about +:doctype: book +:imagesdir: images + +:title: Dynamic plugins reference +:subtitle: Working with plugins in {product-very-short} +:abstract: {product} is preinstalled with a selection of dynamic plugins that users can enable and configure to extend {product-very-short} functionality. + +include::assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc[] \ No newline at end of file diff --git a/titles/plugins-rhdh-reference/modules b/titles/plugins-rhdh-reference/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-reference/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugins-rhdh-troubleshooting/artifacts b/titles/plugins-rhdh-troubleshooting/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-troubleshooting/assemblies b/titles/plugins-rhdh-troubleshooting/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-troubleshooting/docinfo.xml b/titles/plugins-rhdh-troubleshooting/docinfo.xml new file mode 100644 index 0000000000..e7fae0f66e --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/docinfo.xml @@ -0,0 +1,13 @@ +{title} +{product} +{product-version} +{subtitle} + + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-troubleshooting/images b/titles/plugins-rhdh-troubleshooting/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-troubleshooting/master.adoc b/titles/plugins-rhdh-troubleshooting/master.adoc new file mode 100644 index 0000000000..c733fa08f7 --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/master.adoc @@ -0,0 +1,13 @@ +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-about +:doctype: book +:imagesdir: images +:title: Troubleshooting {product} plugins +:subtitle: Troubleshooting {product-very-short} plugins +:abstract: This document describes how to resolve common problems with {product}. +//[id="{context}"] +//= {title} + +//{abstract} + +include::assemblies/dynamic-plugins/assembly-troubleshooting-rhdh-plugins.adoc[] \ No newline at end of file diff --git a/titles/plugins-rhdh-troubleshooting/modules b/titles/plugins-rhdh-troubleshooting/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-troubleshooting/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/plugins-rhdh-using/artifacts b/titles/plugins-rhdh-using/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/plugins-rhdh-using/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/plugins-rhdh-using/assemblies b/titles/plugins-rhdh-using/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/plugins-rhdh-using/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/plugins-rhdh-using/docinfo.xml b/titles/plugins-rhdh-using/docinfo.xml new file mode 100644 index 0000000000..e4b171f26e --- /dev/null +++ b/titles/plugins-rhdh-using/docinfo.xml @@ -0,0 +1,13 @@ +{title} +{product} +{product-version} + + + + {abstract} + + + {company-name} Customer Content Services + + diff --git a/titles/plugins-rhdh-using/images b/titles/plugins-rhdh-using/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/plugins-rhdh-using/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/plugins-rhdh-using/master.adoc b/titles/plugins-rhdh-using/master.adoc new file mode 100644 index 0000000000..512fadb05f --- /dev/null +++ b/titles/plugins-rhdh-using/master.adoc @@ -0,0 +1,13 @@ +include::artifacts/attributes.adoc[] +:context: title-plugins-rhdh-using +:doctype: book +:imagesdir: images +:title: Using dynamic plugins +:subtitle: Using {product-very-short} plugins as an end-user +:abstract: The following sections provide information about how you can use {product-very-short} dynamic plugins. +//[id="{context}"] +//= {title} + +//{abstract} + +include::assemblies/dynamic-plugins/assembly-using-rhdh-plugins.adoc[] \ No newline at end of file diff --git a/titles/plugins-rhdh-using/modules b/titles/plugins-rhdh-using/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/plugins-rhdh-using/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/rel-notes-rhdh/artifacts b/titles/rel-notes-rhdh/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/rel-notes-rhdh/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/rel-notes-rhdh/assemblies b/titles/rel-notes-rhdh/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/rel-notes-rhdh/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/rel-notes-rhdh/docinfo.xml b/titles/rel-notes-rhdh/docinfo.xml new file mode 100644 index 0000000000..4c6026ca94 --- /dev/null +++ b/titles/rel-notes-rhdh/docinfo.xml @@ -0,0 +1,11 @@ +{title} +{product} +{product-version} +{subtitle} + + Red Hat Developer Hub is a developer platform for building developer portals. This document contains release notes for the {product} {product-version}. + + + Red Hat Customer Content Services + + diff --git a/titles/rel-notes-rhdh/images b/titles/rel-notes-rhdh/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/rel-notes-rhdh/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/rel-notes-rhdh/master.adoc b/titles/rel-notes-rhdh/master.adoc new file mode 100644 index 0000000000..7afea95af8 --- /dev/null +++ b/titles/rel-notes-rhdh/master.adoc @@ -0,0 +1 @@ +include::title-rhdh-release-notes.adoc[] \ No newline at end of file diff --git a/titles/rel-notes-rhdh/modules b/titles/rel-notes-rhdh/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/rel-notes-rhdh/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc b/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc new file mode 100644 index 0000000000..2d9a1fd33d --- /dev/null +++ b/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc @@ -0,0 +1,37 @@ +[id='title-rhdh-release-notes'] +include::artifacts/attributes.adoc[] +:title: Release notes +:subtitle: Release notes for {product} {product-version} += {title}: {subtitle} +:context: release-notes-rhdh +:doctype: book +:imagesdir: images + +{product} ({product-short}) {product-version} is now generally available. {product-short} is a fully supported, enterprise-grade productized version of upstream Backstage v{product-backstage-version}. You can access and download the {product} application from the {company-name} https://access.redhat.com/downloads[Customer Portal] or from the https://catalog.redhat.com/search?gs&q=rhdh&searchType=containers[Ecosystem Catalog]. + +//customer support links +include::artifacts/snip-customer-support-info.adoc[] + +//includes +include::modules/release-notes/con-release-notes-overview.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-new-features.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-breaking-changes.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-deprecated-functionalities.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-technology-preview.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-fixed-issues.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-fixed-security-issues.adoc[leveloffset=+1] + + +include::assemblies/assembly-release-notes-known-issues.adoc[leveloffset=+1] diff --git a/titles/upgrade-rhdh/artifacts b/titles/upgrade-rhdh/artifacts new file mode 120000 index 0000000000..f30b6dea60 --- /dev/null +++ b/titles/upgrade-rhdh/artifacts @@ -0,0 +1 @@ +../../artifacts \ No newline at end of file diff --git a/titles/upgrade-rhdh/assemblies b/titles/upgrade-rhdh/assemblies new file mode 120000 index 0000000000..91646274db --- /dev/null +++ b/titles/upgrade-rhdh/assemblies @@ -0,0 +1 @@ +../../assemblies \ No newline at end of file diff --git a/titles/upgrade-rhdh/docinfo.xml b/titles/upgrade-rhdh/docinfo.xml new file mode 100644 index 0000000000..7ad28a1c63 --- /dev/null +++ b/titles/upgrade-rhdh/docinfo.xml @@ -0,0 +1,13 @@ +Upgrading {product} +{product} +{product-version} + + + + You can upgrade a {product} instance to a later version from the {ocp-short} web console using either the Operator or the Helm chart. + + + {company-name} Customer Content Services + + diff --git a/titles/upgrade-rhdh/images b/titles/upgrade-rhdh/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/titles/upgrade-rhdh/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/titles/upgrade-rhdh/master.adoc b/titles/upgrade-rhdh/master.adoc new file mode 100644 index 0000000000..c3dd75ddca --- /dev/null +++ b/titles/upgrade-rhdh/master.adoc @@ -0,0 +1,10 @@ +[id="title-upgrade-rhdh"] +include::artifacts/attributes.adoc[] += Upgrading {product} +:context: title-upgrade-rhdh +:doctype: book +:imagesdir: images + +include::modules/upgrade/proc-upgrade-rhdh-operator.adoc[leveloffset=+1] + +include::modules/upgrade/proc-upgrade-rhdh-helm.adoc[leveloffset=+1] diff --git a/titles/upgrade-rhdh/modules b/titles/upgrade-rhdh/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/titles/upgrade-rhdh/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file