diff --git a/dev/404.html b/dev/404.html new file mode 100644 index 000000000..2eaef7973 --- /dev/null +++ b/dev/404.html @@ -0,0 +1,1956 @@ + + + + + + + + + + + + + + + + + + + Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/ADOPTERS/index.html b/dev/ADOPTERS/index.html new file mode 100644 index 000000000..c8e9b93f7 --- /dev/null +++ b/dev/ADOPTERS/index.html @@ -0,0 +1,2009 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Trusted by - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Who uses Kapitan

+

If you're using Kapitan in your organization, please let us know by adding to this list on the docs/ADOPTERS.md file.

+
+
    +
  • Airfund
  • +
  • Isomorphic Labs
  • +
  • Morgan Stanley
  • +
  • neXenio GmbH
  • +
  • noon
  • +
  • Phaidra
  • +
  • SoftwareAG
  • +
  • Skyscanner
  • +
  • Synthace
  • +
  • RingCentral
  • +
  • Ubisoft
  • +
  • VSHN AG
  • +
  • Flower Labs
  • +
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/CNAME b/dev/CNAME new file mode 100644 index 000000000..a6b29dc21 --- /dev/null +++ b/dev/CNAME @@ -0,0 +1 @@ +kapitan.dev \ No newline at end of file diff --git a/dev/FAQ/index.html b/dev/FAQ/index.html new file mode 100644 index 000000000..78f41968c --- /dev/null +++ b/dev/FAQ/index.html @@ -0,0 +1,2041 @@ + + + + + + + + + + + + + + + + + + + + + + + FAQ - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

FAQ

+

Why do I need Kapitan?

+

See Why do I need Kapitan?

+

Ask your question

+

Please use the comments facility below to ask your question

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/adopters_logos/airfund.png b/dev/adopters_logos/airfund.png new file mode 100644 index 000000000..0632f5099 Binary files /dev/null and b/dev/adopters_logos/airfund.png differ diff --git a/dev/adopters_logos/flower.png b/dev/adopters_logos/flower.png new file mode 100644 index 000000000..3d6acf1bf Binary files /dev/null and b/dev/adopters_logos/flower.png differ diff --git a/dev/adopters_logos/isomorphiclabs.png b/dev/adopters_logos/isomorphiclabs.png new file mode 100644 index 000000000..5dd01d5d4 Binary files /dev/null and b/dev/adopters_logos/isomorphiclabs.png differ diff --git a/dev/adopters_logos/morganstanley.png b/dev/adopters_logos/morganstanley.png new file mode 100644 index 000000000..1866c614a Binary files /dev/null and b/dev/adopters_logos/morganstanley.png differ diff --git a/dev/adopters_logos/nexenio.png b/dev/adopters_logos/nexenio.png new file mode 100644 index 000000000..6b262b945 Binary files /dev/null and b/dev/adopters_logos/nexenio.png differ diff --git a/dev/adopters_logos/noon.png b/dev/adopters_logos/noon.png new file mode 100644 index 000000000..869776570 Binary files /dev/null and b/dev/adopters_logos/noon.png differ diff --git a/dev/adopters_logos/phaidra.png b/dev/adopters_logos/phaidra.png new file mode 100644 index 000000000..aae50caff Binary files /dev/null and b/dev/adopters_logos/phaidra.png differ diff --git a/dev/adopters_logos/ringcentral.png b/dev/adopters_logos/ringcentral.png new file mode 100644 index 000000000..3dbe327e8 Binary files /dev/null and b/dev/adopters_logos/ringcentral.png differ diff --git a/dev/adopters_logos/skyscanner.png b/dev/adopters_logos/skyscanner.png new file mode 100644 index 000000000..150269e26 Binary files /dev/null and b/dev/adopters_logos/skyscanner.png differ diff --git a/dev/adopters_logos/softwareag.png b/dev/adopters_logos/softwareag.png new file mode 100644 index 000000000..ae9b5e271 Binary files /dev/null and b/dev/adopters_logos/softwareag.png differ diff --git a/dev/adopters_logos/synthace.png b/dev/adopters_logos/synthace.png new file mode 100644 index 000000000..b257cc64a Binary files /dev/null and b/dev/adopters_logos/synthace.png differ diff --git a/dev/adopters_logos/ubisoft.png b/dev/adopters_logos/ubisoft.png new file mode 100644 index 000000000..168a4b9ac Binary files /dev/null and b/dev/adopters_logos/ubisoft.png differ diff --git a/dev/adopters_logos/vhsn.png b/dev/adopters_logos/vhsn.png new file mode 100644 index 000000000..18304584b Binary files /dev/null and b/dev/adopters_logos/vhsn.png differ diff --git a/dev/assets/images/favicon.png b/dev/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/dev/assets/images/favicon.png differ diff --git a/dev/assets/javascripts/bundle.8fd75fb4.min.js b/dev/assets/javascripts/bundle.8fd75fb4.min.js new file mode 100644 index 000000000..2f2f130e2 --- /dev/null +++ b/dev/assets/javascripts/bundle.8fd75fb4.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var _i=Object.create;var br=Object.defineProperty;var Ai=Object.getOwnPropertyDescriptor;var Ci=Object.getOwnPropertyNames,Ft=Object.getOwnPropertySymbols,ki=Object.getPrototypeOf,vr=Object.prototype.hasOwnProperty,eo=Object.prototype.propertyIsEnumerable;var Zr=(e,t,r)=>t in e?br(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,F=(e,t)=>{for(var r in t||(t={}))vr.call(t,r)&&Zr(e,r,t[r]);if(Ft)for(var r of Ft(t))eo.call(t,r)&&Zr(e,r,t[r]);return e};var to=(e,t)=>{var r={};for(var o in e)vr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Ft)for(var o of Ft(e))t.indexOf(o)<0&&eo.call(e,o)&&(r[o]=e[o]);return r};var gr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Hi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ci(t))!vr.call(e,n)&&n!==r&&br(e,n,{get:()=>t[n],enumerable:!(o=Ai(t,n))||o.enumerable});return e};var jt=(e,t,r)=>(r=e!=null?_i(ki(e)):{},Hi(t||!e||!e.__esModule?br(r,"default",{value:e,enumerable:!0}):r,e));var ro=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var no=gr((xr,oo)=>{(function(e,t){typeof xr=="object"&&typeof oo!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(C){return!!(C&&C!==document&&C.nodeName!=="HTML"&&C.nodeName!=="BODY"&&"classList"in C&&"contains"in C.classList)}function c(C){var ct=C.type,Ne=C.tagName;return!!(Ne==="INPUT"&&s[ct]&&!C.readOnly||Ne==="TEXTAREA"&&!C.readOnly||C.isContentEditable)}function p(C){C.classList.contains("focus-visible")||(C.classList.add("focus-visible"),C.setAttribute("data-focus-visible-added",""))}function l(C){C.hasAttribute("data-focus-visible-added")&&(C.classList.remove("focus-visible"),C.removeAttribute("data-focus-visible-added"))}function f(C){C.metaKey||C.altKey||C.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(C){o=!1}function h(C){a(C.target)&&(o||c(C.target))&&p(C.target)}function w(C){a(C.target)&&(C.target.classList.contains("focus-visible")||C.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(C.target))}function A(C){document.visibilityState==="hidden"&&(n&&(o=!0),Z())}function Z(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(C){C.target.nodeName&&C.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),Z(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var zr=gr((kt,Vr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof kt=="object"&&typeof Vr=="object"?Vr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof kt=="object"?kt.ClipboardJS=r():t.ClipboardJS=r()})(kt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Li}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(D){try{return document.execCommand(D)}catch(M){return!1}}var h=function(M){var O=f()(M);return u("cut"),O},w=h;function A(D){var M=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[M?"right":"left"]="-9999px";var I=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(I,"px"),O.setAttribute("readonly",""),O.value=D,O}var Z=function(M,O){var I=A(M);O.container.appendChild(I);var W=f()(I);return u("copy"),I.remove(),W},te=function(M){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},I="";return typeof M=="string"?I=Z(M,O):M instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(M==null?void 0:M.type)?I=Z(M.value,O):(I=f()(M),u("copy")),I},J=te;function C(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?C=function(O){return typeof O}:C=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},C(D)}var ct=function(){var M=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=M.action,I=O===void 0?"copy":O,W=M.container,K=M.target,Ce=M.text;if(I!=="copy"&&I!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(K!==void 0)if(K&&C(K)==="object"&&K.nodeType===1){if(I==="copy"&&K.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(I==="cut"&&(K.hasAttribute("readonly")||K.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Ce)return J(Ce,{container:W});if(K)return I==="cut"?w(K):J(K,{container:W})},Ne=ct;function Pe(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Pe=function(O){return typeof O}:Pe=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Pe(D)}function xi(D,M){if(!(D instanceof M))throw new TypeError("Cannot call a class as a function")}function Xr(D,M){for(var O=0;O0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof W.action=="function"?W.action:this.defaultAction,this.target=typeof W.target=="function"?W.target:this.defaultTarget,this.text=typeof W.text=="function"?W.text:this.defaultText,this.container=Pe(W.container)==="object"?W.container:document.body}},{key:"listenClick",value:function(W){var K=this;this.listener=p()(W,"click",function(Ce){return K.onClick(Ce)})}},{key:"onClick",value:function(W){var K=W.delegateTarget||W.currentTarget,Ce=this.action(K)||"copy",It=Ne({action:Ce,container:this.container,target:this.target(K),text:this.text(K)});this.emit(It?"success":"error",{action:Ce,text:It,trigger:K,clearSelection:function(){K&&K.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(W){return hr("action",W)}},{key:"defaultTarget",value:function(W){var K=hr("target",W);if(K)return document.querySelector(K)}},{key:"defaultText",value:function(W){return hr("text",W)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(W){var K=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(W,K)}},{key:"cut",value:function(W){return w(W)}},{key:"isSupported",value:function(){var W=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],K=typeof W=="string"?[W]:W,Ce=!!document.queryCommandSupported;return K.forEach(function(It){Ce=Ce&&!!document.queryCommandSupported(It)}),Ce}}]),O}(a()),Li=Mi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s},438:function(o,n,i){var s=i(828);function a(l,f,u,h,w){var A=p.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function c(l,f,u,h,w){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return a(A,f,u,h,w)}))}function p(l,f,u,h){return function(w){w.delegateTarget=s(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(o,n,i){var s=i(879),a=i(438);function c(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!s.string(h))throw new TypeError("Second argument must be a String");if(!s.fn(w))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,h,w);if(s.nodeList(u))return l(u,h,w);if(s.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return a(document.body,u,h,w)}o.exports=c},817:function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var Va=/["'&<>]/;qn.exports=za;function za(e){var t=""+e,r=Va.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function V(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function z(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||a(u,h)})})}function a(u,h){try{c(o[u](h))}catch(w){f(i[0][3],w)}}function c(u){u.value instanceof ot?Promise.resolve(u.value.v).then(p,l):f(i[0][2],u)}function p(u){a("next",u)}function l(u){a("throw",u)}function f(u,h){u(h),i.shift(),i.length&&a(i[0][0],i[0][1])}}function so(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof ue=="function"?ue(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function k(e){return typeof e=="function"}function pt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Wt=pt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=ue(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(A){t={error:A}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof Wt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=ue(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{co(w)}catch(A){i=i!=null?i:[],A instanceof Wt?i=z(z([],V(i)),V(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Wt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)co(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Er=Ie.EMPTY;function Dt(e){return e instanceof Ie||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function co(e){k(e)?e():e.unsubscribe()}var ke={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var lt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?Er:(this.currentObservers=null,a.push(r),new Ie(function(){o.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new vo(r,o)},t}(j);var vo=function(e){se(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Er},t}(v);var St={now:function(){return(St.delegate||Date).now()},delegate:void 0};var Ot=function(e){se(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=St);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(ut.cancelAnimationFrame(o),r._scheduled=void 0)},t}(zt);var yo=function(e){se(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(qt);var de=new yo(xo);var L=new j(function(e){return e.complete()});function Kt(e){return e&&k(e.schedule)}function _r(e){return e[e.length-1]}function Je(e){return k(_r(e))?e.pop():void 0}function Ae(e){return Kt(_r(e))?e.pop():void 0}function Qt(e,t){return typeof _r(e)=="number"?e.pop():t}var dt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Yt(e){return k(e==null?void 0:e.then)}function Bt(e){return k(e[ft])}function Gt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Jt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Di(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Xt=Di();function Zt(e){return k(e==null?void 0:e[Xt])}function er(e){return ao(this,arguments,function(){var r,o,n,i;return Ut(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,ot(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,ot(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,ot(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function tr(e){return k(e==null?void 0:e.getReader)}function N(e){if(e instanceof j)return e;if(e!=null){if(Bt(e))return Ni(e);if(dt(e))return Vi(e);if(Yt(e))return zi(e);if(Gt(e))return Eo(e);if(Zt(e))return qi(e);if(tr(e))return Ki(e)}throw Jt(e)}function Ni(e){return new j(function(t){var r=e[ft]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Vi(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?g(function(n,i){return e(n,i,o)}):ce,ye(1),r?Qe(t):jo(function(){return new or}))}}function $r(e){return e<=0?function(){return L}:x(function(t,r){var o=[];t.subscribe(S(r,function(n){o.push(n),e=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new v}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,h=0,w=!1,A=!1,Z=function(){f==null||f.unsubscribe(),f=void 0},te=function(){Z(),l=u=void 0,w=A=!1},J=function(){var C=l;te(),C==null||C.unsubscribe()};return x(function(C,ct){h++,!A&&!w&&Z();var Ne=u=u!=null?u:r();ct.add(function(){h--,h===0&&!A&&!w&&(f=Pr(J,c))}),Ne.subscribe(ct),!l&&h>0&&(l=new it({next:function(Pe){return Ne.next(Pe)},error:function(Pe){A=!0,Z(),f=Pr(te,n,Pe),Ne.error(Pe)},complete:function(){w=!0,Z(),f=Pr(te,s),Ne.complete()}}),N(C).subscribe(l))})(p)}}function Pr(e,t){for(var r=[],o=2;oe.next(document)),e}function R(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=me(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function me(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var la=T(d(document.body,"focusin"),d(document.body,"focusout")).pipe(be(1),q(void 0),m(()=>Re()||document.body),B(1));function vt(e){return la.pipe(m(t=>e.contains(t)),Y())}function Vo(e,t){return T(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?be(t):ce,q(!1))}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function zo(e){return T(d(window,"load"),d(window,"resize")).pipe(Me(0,de),m(()=>Ue(e)),q(Ue(e)))}function ir(e){return{x:e.scrollLeft,y:e.scrollTop}}function et(e){return T(d(e,"scroll"),d(window,"resize")).pipe(Me(0,de),m(()=>ir(e)),q(ir(e)))}function qo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)qo(e,r)}function E(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)qo(o,n);return o}function ar(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function gt(e){let t=E("script",{src:e});return H(()=>(document.head.appendChild(t),T(d(t,"load"),d(t,"error").pipe(b(()=>Ar(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),ye(1))))}var Ko=new v,ma=H(()=>typeof ResizeObserver=="undefined"?gt("https://unpkg.com/resize-observer-polyfill"):$(void 0)).pipe(m(()=>new ResizeObserver(e=>{for(let t of e)Ko.next(t)})),b(e=>T(qe,$(e)).pipe(_(()=>e.disconnect()))),B(1));function pe(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Ee(e){return ma.pipe(y(t=>t.observe(e)),b(t=>Ko.pipe(g(({target:r})=>r===e),_(()=>t.unobserve(e)),m(()=>pe(e)))),q(pe(e)))}function xt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function sr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var Qo=new v,fa=H(()=>$(new IntersectionObserver(e=>{for(let t of e)Qo.next(t)},{threshold:0}))).pipe(b(e=>T(qe,$(e)).pipe(_(()=>e.disconnect()))),B(1));function yt(e){return fa.pipe(y(t=>t.observe(e)),b(t=>Qo.pipe(g(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function Yo(e,t=16){return et(e).pipe(m(({y:r})=>{let o=pe(e),n=xt(e);return r>=n.height-o.height-t}),Y())}var cr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function Bo(e){return cr[e].checked}function Be(e,t){cr[e].checked!==t&&cr[e].click()}function We(e){let t=cr[e];return d(t,"change").pipe(m(()=>t.checked),q(t.checked))}function ua(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function da(){return T(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(q(!1))}function Go(){let e=d(window,"keydown").pipe(g(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:Bo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),g(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!ua(o,r)}return!0}),le());return da().pipe(b(t=>t?L:e))}function ve(){return new URL(location.href)}function st(e,t=!1){if(G("navigation.instant")&&!t){let r=E("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function Jo(){return new v}function Xo(){return location.hash.slice(1)}function Zo(e){let t=E("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function ha(e){return T(d(window,"hashchange"),e).pipe(m(Xo),q(Xo()),g(t=>t.length>0),B(1))}function en(e){return ha(e).pipe(m(t=>me(`[id="${t}"]`)),g(t=>typeof t!="undefined"))}function At(e){let t=matchMedia(e);return nr(r=>t.addListener(()=>r(t.matches))).pipe(q(t.matches))}function tn(){let e=matchMedia("print");return T(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(q(e.matches))}function Ur(e,t){return e.pipe(b(r=>r?t():L))}function Wr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let s=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+s*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function De(e,t){return Wr(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function rn(e,t){let r=new DOMParser;return Wr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function on(e,t){let r=new DOMParser;return Wr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function nn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function an(){return T(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(nn),q(nn()))}function sn(){return{width:innerWidth,height:innerHeight}}function cn(){return d(window,"resize",{passive:!0}).pipe(m(sn),q(sn()))}function pn(){return Q([an(),cn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function pr(e,{viewport$:t,header$:r}){let o=t.pipe(X("size")),n=Q([o,r]).pipe(m(()=>Ue(e)));return Q([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function ba(e){return d(e,"message",t=>t.data)}function va(e){let t=new v;return t.subscribe(r=>e.postMessage(r)),t}function ln(e,t=new Worker(e)){let r=ba(t),o=va(t),n=new v;n.subscribe(o);let i=o.pipe(ee(),oe(!0));return n.pipe(ee(),$e(r.pipe(U(i))),le())}var ga=P("#__config"),Et=JSON.parse(ga.textContent);Et.base=`${new URL(Et.base,ve())}`;function we(){return Et}function G(e){return Et.features.includes(e)}function ge(e,t){return typeof t!="undefined"?Et.translations[e].replace("#",t.toString()):Et.translations[e]}function Te(e,t=document){return P(`[data-md-component=${e}]`,t)}function ne(e,t=document){return R(`[data-md-component=${e}]`,t)}function xa(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function mn(e){if(!G("announce.dismiss")||!e.childElementCount)return L;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new v;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),xa(e).pipe(y(r=>t.next(r)),_(()=>t.complete()),m(r=>F({ref:e},r)))})}function ya(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function fn(e,t){let r=new v;return r.subscribe(({hidden:o})=>{e.hidden=o}),ya(e,t).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))}function Ct(e,t){return t==="inline"?E("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"})):E("div",{class:"md-tooltip",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"}))}function un(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return E("aside",{class:"md-annotation",tabIndex:0},Ct(t),E("a",{href:r,class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}else return E("aside",{class:"md-annotation",tabIndex:0},Ct(t),E("span",{class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}function dn(e){return E("button",{class:"md-clipboard md-icon",title:ge("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Dr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,E("del",null,p)," "],[]).slice(0,-1),i=we(),s=new URL(e.location,i.base);G("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=we();return E("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},E("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&E("div",{class:"md-search-result__icon md-icon"}),r>0&&E("h1",null,e.title),r<=0&&E("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return E("span",{class:`md-tag ${p}`},c)}),o>0&&n.length>0&&E("p",{class:"md-search-result__terms"},ge("search.result.term.missing"),": ",...n)))}function hn(e){let t=e[0].score,r=[...e],o=we(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreDr(l,1)),...c.length?[E("details",{class:"md-search-result__more"},E("summary",{tabIndex:-1},E("div",null,c.length>0&&c.length===1?ge("search.result.more.one"):ge("search.result.more.other",c.length))),...c.map(l=>Dr(l,1)))]:[]];return E("li",{class:"md-search-result__item"},p)}function bn(e){return E("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>E("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?ar(r):r)))}function Nr(e){let t=`tabbed-control tabbed-control--${e}`;return E("div",{class:t,hidden:!0},E("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function vn(e){return E("div",{class:"md-typeset__scrollwrap"},E("div",{class:"md-typeset__table"},e))}function Ea(e){let t=we(),r=new URL(`../${e.version}/`,t.base);return E("li",{class:"md-version__item"},E("a",{href:`${r}`,class:"md-version__link"},e.title))}function gn(e,t){return E("div",{class:"md-version"},E("button",{class:"md-version__current","aria-label":ge("select.version")},t.title),E("ul",{class:"md-version__list"},e.map(Ea)))}var wa=0;function Ta(e,t){document.body.append(e);let{width:r}=pe(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=sr(t),n=typeof o!="undefined"?et(o):$({x:0,y:0}),i=T(vt(t),Vo(t)).pipe(Y());return Q([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Ue(t),l=pe(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function Ge(e){let t=e.title;if(!t.length)return L;let r=`__tooltip_${wa++}`,o=Ct(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,H(()=>{let i=new v;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),T(i.pipe(g(({active:s})=>s)),i.pipe(be(250),g(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,de)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(_t(125,de),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ta(o,e).pipe(y(s=>i.next(s)),_(()=>i.complete()),m(s=>F({ref:e},s)))}).pipe(ze(ie))}function Sa(e,t){let r=H(()=>Q([zo(e),et(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=pe(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return vt(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),ye(+!o||1/0))))}function xn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new v,s=i.pipe(ee(),oe(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),yt(e).pipe(U(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),T(i.pipe(g(({active:a})=>a)),i.pipe(be(250),g(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,de)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(_t(125,de),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(s),g(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),d(n,"mousedown").pipe(U(s),ae(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Re())==null||p.blur()}}),r.pipe(U(s),g(a=>a===o),Ye(125)).subscribe(()=>e.focus()),Sa(e,t).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))})}function Oa(e){return e.tagName==="CODE"?R(".c, .c1, .cm",e):[e]}function Ma(e){let t=[];for(let r of Oa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function yn(e,t){t.append(...Array.from(e.childNodes))}function lr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Ma(t)){let[,c]=a.textContent.match(/\((\d+)\)/);me(`:scope > li:nth-child(${c})`,e)&&(s.set(c,un(c,i)),a.replaceWith(s.get(c)))}return s.size===0?L:H(()=>{let a=new v,c=a.pipe(ee(),oe(!0)),p=[];for(let[l,f]of s)p.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?yn(f,u):yn(u,f)}),T(...[...s].map(([,l])=>xn(l,t,{target$:r}))).pipe(_(()=>a.complete()),le())})}function En(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return En(t)}}function wn(e,t){return H(()=>{let r=En(e);return typeof r!="undefined"?lr(r,e,t):L})}var Tn=jt(zr());var La=0;function Sn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Sn(t)}}function _a(e){return Ee(e).pipe(m(({width:t})=>({scrollable:xt(e).width>t})),X("scrollable"))}function On(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new v,i=n.pipe($r(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[];if(Tn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${La++}`;let p=dn(c.id);c.insertBefore(p,e),G("content.tooltips")&&s.push(Ge(p))}let a=e.closest(".highlight");if(a instanceof HTMLElement){let c=Sn(a);if(typeof c!="undefined"&&(a.classList.contains("annotate")||G("content.code.annotate"))){let p=lr(c,e,t);s.push(Ee(a).pipe(U(i),m(({width:l,height:f})=>l&&f),Y(),b(l=>l?p:L)))}}return _a(e).pipe(y(c=>n.next(c)),_(()=>n.complete()),m(c=>F({ref:e},c)),$e(...s))});return G("content.lazy")?yt(e).pipe(g(n=>n),ye(1),b(()=>o)):o}function Aa(e,{target$:t,print$:r}){let o=!0;return T(t.pipe(m(n=>n.closest("details:not([open])")),g(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(g(n=>n||!o),y(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Mn(e,t){return H(()=>{let r=new v;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Aa(e,t).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}var Ln=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var qr,ka=0;function Ha(){return typeof mermaid=="undefined"||mermaid instanceof Element?gt("https://unpkg.com/mermaid@10.7.0/dist/mermaid.min.js"):$(void 0)}function _n(e){return e.classList.remove("mermaid"),qr||(qr=Ha().pipe(y(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Ln,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),qr.subscribe(()=>ro(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${ka++}`,r=E("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),qr.pipe(m(()=>({ref:e})))}var An=E("table");function Cn(e){return e.replaceWith(An),An.replaceWith(vn(e)),$({ref:e})}function $a(e){let t=e.find(r=>r.checked)||e[0];return T(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function kn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=R(":scope > input",e),i=Nr("prev");e.append(i);let s=Nr("next");return e.append(s),H(()=>{let a=new v,c=a.pipe(ee(),oe(!0));Q([a,Ee(e)]).pipe(U(c),Me(1,de)).subscribe({next([{active:p},l]){let f=Ue(p),{width:u}=pe(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=ir(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),Q([et(o),Ee(o)]).pipe(U(c)).subscribe(([p,l])=>{let f=xt(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),T(d(i,"click").pipe(m(()=>-1)),d(s,"click").pipe(m(()=>1))).pipe(U(c)).subscribe(p=>{let{width:l}=pe(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(U(c),g(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=P(`label[for="${p.id}"]`);l.replaceChildren(E("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(c),g(f=>!(f.metaKey||f.ctrlKey)),y(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&a.pipe(Le(1),ae(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of R("[data-tabs]"))for(let A of R(":scope > input",w)){let Z=P(`label[for="${A.id}"]`);if(Z!==p&&Z.innerText.trim()===f){Z.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),a.pipe(U(c)).subscribe(()=>{for(let p of R("audio, video",e))p.pause()}),$a(n).pipe(y(p=>a.next(p)),_(()=>a.complete()),m(p=>F({ref:e},p)))}).pipe(ze(ie))}function Hn(e,{viewport$:t,target$:r,print$:o}){return T(...R(".annotate:not(.highlight)",e).map(n=>wn(n,{target$:r,print$:o})),...R("pre:not(.mermaid) > code",e).map(n=>On(n,{target$:r,print$:o})),...R("pre.mermaid",e).map(n=>_n(n)),...R("table:not([class])",e).map(n=>Cn(n)),...R("details",e).map(n=>Mn(n,{target$:r,print$:o})),...R("[data-tabs]",e).map(n=>kn(n,{viewport$:t,target$:r})),...R("[title]",e).filter(()=>G("content.tooltips")).map(n=>Ge(n)))}function Ra(e,{alert$:t}){return t.pipe(b(r=>T($(!0),$(!1).pipe(Ye(2e3))).pipe(m(o=>({message:r,active:o})))))}function $n(e,t){let r=P(".md-typeset",e);return H(()=>{let o=new v;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ra(e,t).pipe(y(n=>o.next(n)),_(()=>o.complete()),m(n=>F({ref:e},n)))})}function Pa({viewport$:e}){if(!G("header.autohide"))return $(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ke(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),Y()),o=We("search");return Q([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),Y(),b(n=>n?r:$(!1)),q(!1))}function Rn(e,t){return H(()=>Q([Ee(e),Pa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),Y((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function Pn(e,{header$:t,main$:r}){return H(()=>{let o=new v,n=o.pipe(ee(),oe(!0));o.pipe(X("active"),je(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(R("[title]",e)).pipe(g(()=>G("content.tooltips")),re(s=>Ge(s)));return r.subscribe(o),t.pipe(U(n),m(s=>F({ref:e},s)),$e(i.pipe(U(n))))})}function Ia(e,{viewport$:t,header$:r}){return pr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=pe(e);return{active:o>=n}}),X("active"))}function In(e,t){return H(()=>{let r=new v;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=me(".md-content h1");return typeof o=="undefined"?L:Ia(o,t).pipe(y(n=>r.next(n)),_(()=>r.complete()),m(n=>F({ref:e},n)))})}function Fn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),Y()),n=o.pipe(b(()=>Ee(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),X("bottom"))));return Q([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),Y((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function Fa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return $(...e).pipe(re(o=>d(o,"change").pipe(m(()=>o))),q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function jn(e){let t=R("input",e),r=E("meta",{name:"theme-color"});document.head.appendChild(r);let o=E("meta",{name:"color-scheme"});document.head.appendChild(o);let n=At("(prefers-color-scheme: light)");return H(()=>{let i=new v;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;a{let s=Te("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(Oe(ie)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Fa(t).pipe(U(n.pipe(Le(1))),at(),y(s=>i.next(s)),_(()=>i.complete()),m(s=>F({ref:e},s)))})}function Un(e,{progress$:t}){return H(()=>{let r=new v;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(y(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Kr=jt(zr());function ja(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Wn({alert$:e}){Kr.default.isSupported()&&new j(t=>{new Kr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ja(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(y(t=>{t.trigger.focus()}),m(()=>ge("clipboard.copied"))).subscribe(e)}function Dn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ua(e,t){let r=new Map;for(let o of R("url",e)){let n=P("loc",o),i=[Dn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let s of R("[rel=alternate]",o)){let a=s.getAttribute("href");a!=null&&i.push(Dn(new URL(a),t))}}return r}function mr(e){return on(new URL("sitemap.xml",e)).pipe(m(t=>Ua(t,new URL(e))),he(()=>$(new Map)))}function Wa(e,t){if(!(e.target instanceof Element))return L;let r=e.target.closest("a");if(r===null)return L;if(r.target||e.metaKey||e.ctrlKey)return L;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),$(new URL(r.href))):L}function Nn(e){let t=new Map;for(let r of R(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Vn(e){for(let t of R("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return $(e)}function Da(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=me(o),i=me(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Nn(document);for(let[o,n]of Nn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values())o.remove();let r=Te("container");return Fe(R("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),L}),ee(),oe(e))}function zn({location$:e,viewport$:t,progress$:r}){let o=we();if(location.protocol==="file:")return L;let n=mr(o.base);$(document).subscribe(Vn);let i=d(document.body,"click").pipe(je(n),b(([c,p])=>Wa(c,p)),le()),s=d(window,"popstate").pipe(m(ve),le());i.pipe(ae(t)).subscribe(([c,{offset:p}])=>{history.replaceState(p,""),history.pushState(null,"",c)}),T(i,s).subscribe(e);let a=e.pipe(X("pathname"),b(c=>rn(c,{progress$:r}).pipe(he(()=>(st(c,!0),L)))),b(Vn),b(Da),le());return T(a.pipe(ae(e,(c,p)=>p)),e.pipe(X("pathname"),b(()=>e),X("hash")),e.pipe(Y((c,p)=>c.pathname===p.pathname&&c.hash===p.hash),b(()=>i),y(()=>history.back()))).subscribe(c=>{var p,l;history.state!==null||!c.hash?window.scrollTo(0,(l=(p=history.state)==null?void 0:p.y)!=null?l:0):(history.scrollRestoration="auto",Zo(c.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(X("offset"),be(100)).subscribe(({offset:c})=>{history.replaceState(c,"")}),a}var Qn=jt(Kn());function Yn(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,Qn.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ht(e){return e.type===1}function fr(e){return e.type===3}function Bn(e,t){let r=ln(e);return T($(location.protocol!=="file:"),We("search")).pipe(He(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function Gn({document$:e}){let t=we(),r=De(new URL("../versions.json",t.base)).pipe(he(()=>L)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>d(document.body,"click").pipe(g(i=>!i.metaKey&&!i.ctrlKey),ae(o),b(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?L:(i.preventDefault(),$(c))}}return L}),b(i=>{let{version:s}=n.get(i);return mr(new URL(i)).pipe(m(a=>{let p=ve().href.replace(t.base,"");return a.has(p.split("#")[0])?new URL(`../${s}/${p}`,t.base):new URL(i)}))})))).subscribe(n=>st(n,!0)),Q([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(gn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var s;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let a=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(a)||(a=[a]);e:for(let c of a)for(let p of n.aliases.concat(n.version))if(new RegExp(c,"i").test(p)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let a of ne("outdated"))a.hidden=!1})}function Ka(e,{worker$:t}){let{searchParams:r}=ve();r.has("q")&&(Be("search",!0),e.value=r.get("q"),e.focus(),We("search").pipe(He(i=>!i)).subscribe(()=>{let i=ve();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=vt(e),n=T(t.pipe(He(Ht)),d(e,"keyup"),o).pipe(m(()=>e.value),Y());return Q([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),B(1))}function Jn(e,{worker$:t}){let r=new v,o=r.pipe(ee(),oe(!0));Q([t.pipe(He(Ht)),r],(i,s)=>s).pipe(X("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(X("focus")).subscribe(({focus:i})=>{i&&Be("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),Ka(e,{worker$:t}).pipe(y(i=>r.next(i)),_(()=>r.complete()),m(i=>F({ref:e},i)),B(1))}function Xn(e,{worker$:t,query$:r}){let o=new v,n=Yo(e.parentElement).pipe(g(Boolean)),i=e.parentElement,s=P(":scope > :first-child",e),a=P(":scope > :last-child",e);We("search").subscribe(l=>a.setAttribute("role",l?"list":"presentation")),o.pipe(ae(r),Ir(t.pipe(He(Ht)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?ge("search.result.none"):ge("search.result.placeholder");break;case 1:s.textContent=ge("search.result.one");break;default:let u=ar(l.length);s.textContent=ge("search.result.other",u)}});let c=o.pipe(y(()=>a.innerHTML=""),b(({items:l})=>T($(...l.slice(0,10)),$(...l.slice(10)).pipe(Ke(4),jr(n),b(([f])=>f)))),m(hn),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(re(l=>{let f=me("details",l);return typeof f=="undefined"?L:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(g(fr),m(({data:l})=>l)).pipe(y(l=>o.next(l)),_(()=>o.complete()),m(l=>F({ref:e},l)))}function Qa(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ve();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Zn(e,t){let r=new v,o=r.pipe(ee(),oe(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),Qa(e,t).pipe(y(n=>r.next(n)),_(()=>r.complete()),m(n=>F({ref:e},n)))}function ei(e,{worker$:t,keyboard$:r}){let o=new v,n=Te("search-query"),i=T(d(n,"keydown"),d(n,"focus")).pipe(Oe(ie),m(()=>n.value),Y());return o.pipe(je(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(g(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(g(fr),m(({data:a})=>a)).pipe(y(a=>o.next(a)),_(()=>o.complete()),m(()=>({ref:e})))}function ti(e,{index$:t,keyboard$:r}){let o=we();try{let n=Bn(o.search,t),i=Te("search-query",e),s=Te("search-result",e);d(e,"click").pipe(g(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>Be("search",!1)),r.pipe(g(({mode:c})=>c==="search")).subscribe(c=>{let p=Re();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of R(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}c.claim()}break;case"Escape":case"Tab":Be("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...R(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(g(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Jn(i,{worker$:n});return T(a,Xn(s,{worker$:n,query$:a})).pipe($e(...ne("search-share",e).map(c=>Zn(c,{query$:a})),...ne("search-suggest",e).map(c=>ei(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,qe}}function ri(e,{index$:t,location$:r}){return Q([t,r.pipe(q(ve()),g(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>Yn(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=E("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function Ya(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return Q([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),Y((i,s)=>i.height===s.height&&i.locked===s.locked))}function Qr(e,o){var n=o,{header$:t}=n,r=to(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:s}=Ue(i);return H(()=>{let a=new v,c=a.pipe(ee(),oe(!0)),p=a.pipe(Me(0,de));return p.pipe(ae(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(He()).subscribe(()=>{for(let l of R(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=pe(f);f.scrollTo({top:u-h/2})}}}),fe(R("label[tabindex]",e)).pipe(re(l=>d(l,"click").pipe(Oe(ie),m(()=>l),U(c)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),Ya(e,r).pipe(y(l=>a.next(l)),_(()=>a.complete()),m(l=>F({ref:e},l)))})}function oi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Lt(De(`${r}/releases/latest`).pipe(he(()=>L),m(o=>({version:o.tag_name})),Qe({})),De(r).pipe(he(()=>L),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Qe({}))).pipe(m(([o,n])=>F(F({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return De(r).pipe(m(o=>({repositories:o.public_repos})),Qe({}))}}function ni(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return De(r).pipe(he(()=>L),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Qe({}))}function ii(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return oi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ni(r,o)}return L}var Ba;function Ga(e){return Ba||(Ba=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return $(t);if(ne("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return L}return ii(e.href).pipe(y(o=>__md_set("__source",o,sessionStorage)))}).pipe(he(()=>L),g(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function ai(e){let t=P(":scope > :last-child",e);return H(()=>{let r=new v;return r.subscribe(({facts:o})=>{t.appendChild(bn(o)),t.classList.add("md-source__repository--active")}),Ga(e).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}function Ja(e,{viewport$:t,header$:r}){return Ee(document.body).pipe(b(()=>pr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),X("hidden"))}function si(e,t){return H(()=>{let r=new v;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?$({hidden:!1}):Ja(e,t)).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}function Xa(e,{viewport$:t,header$:r}){let o=new Map,n=R(".md-nav__link",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=me(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(X("height"),m(({height:a})=>{let c=Te("main"),p=P(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return Ee(document.body).pipe(X("height"),b(a=>H(()=>{let c=[];return $([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),je(i),b(([c,p])=>t.pipe(Rr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(a.height);for(;f.length;){let[,A]=f[0];if(A-p=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),Y((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),q({prev:[],next:[]}),Ke(2,1),m(([a,c])=>a.prev.length{let i=new v,s=i.pipe(ee(),oe(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),G("toc.follow")){let a=T(t.pipe(be(1),m(()=>{})),t.pipe(be(250),m(()=>"smooth")));i.pipe(g(({prev:c})=>c.length>0),je(o.pipe(Oe(ie))),ae(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=sr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=pe(f);f.scrollTo({top:u-h/2,behavior:p})}}})}return G("navigation.tracking")&&t.pipe(U(s),X("offset"),be(250),Le(1),U(n.pipe(Le(1))),at({delay:250}),ae(i)).subscribe(([,{prev:a}])=>{let c=ve(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),Xa(e,{viewport$:t,header$:r}).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))})}function Za(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),Ke(2,1),m(([s,a])=>s>a&&a>0),Y()),i=r.pipe(m(({active:s})=>s));return Q([i,n]).pipe(m(([s,a])=>!(s&&a)),Y(),U(o.pipe(Le(1))),oe(!0),at({delay:250}),m(s=>({hidden:s})))}function pi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new v,s=i.pipe(ee(),oe(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(s),X("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),d(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),Za(e,{viewport$:t,main$:o,target$:n}).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))}function li({document$:e}){e.pipe(b(()=>R(".md-ellipsis")),re(t=>yt(t).pipe(U(e.pipe(Le(1))),g(r=>r),m(()=>t),ye(1))),g(t=>t.offsetWidth{let r=t.innerText,o=t.closest("a")||t;return o.title=r,Ge(o).pipe(U(e.pipe(Le(1))),_(()=>o.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>R(".md-status")),re(t=>Ge(t))).subscribe()}function mi({document$:e,tablet$:t}){e.pipe(b(()=>R(".md-toggle--indeterminate")),y(r=>{r.indeterminate=!0,r.checked=!1}),re(r=>d(r,"change").pipe(Fr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ae(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function es(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function fi({document$:e}){e.pipe(b(()=>R("[data-md-scrollfix]")),y(t=>t.removeAttribute("data-md-scrollfix")),g(es),re(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function ui({viewport$:e,tablet$:t}){Q([We("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>$(r).pipe(Ye(r?400:100))),ae(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ts(){return location.protocol==="file:"?gt(`${new URL("search/search_index.js",Yr.base)}`).pipe(m(()=>__index),B(1)):De(new URL("search/search_index.json",Yr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var rt=No(),Rt=Jo(),wt=en(Rt),Br=Go(),_e=pn(),ur=At("(min-width: 960px)"),hi=At("(min-width: 1220px)"),bi=tn(),Yr=we(),vi=document.forms.namedItem("search")?ts():qe,Gr=new v;Wn({alert$:Gr});var Jr=new v;G("navigation.instant")&&zn({location$:Rt,viewport$:_e,progress$:Jr}).subscribe(rt);var di;((di=Yr.version)==null?void 0:di.provider)==="mike"&&Gn({document$:rt});T(Rt,wt).pipe(Ye(125)).subscribe(()=>{Be("drawer",!1),Be("search",!1)});Br.pipe(g(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=me("link[rel=prev]");typeof t!="undefined"&&st(t);break;case"n":case".":let r=me("link[rel=next]");typeof r!="undefined"&&st(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});li({document$:rt});mi({document$:rt,tablet$:ur});fi({document$:rt});ui({viewport$:_e,tablet$:ur});var tt=Rn(Te("header"),{viewport$:_e}),$t=rt.pipe(m(()=>Te("main")),b(e=>Fn(e,{viewport$:_e,header$:tt})),B(1)),rs=T(...ne("consent").map(e=>fn(e,{target$:wt})),...ne("dialog").map(e=>$n(e,{alert$:Gr})),...ne("header").map(e=>Pn(e,{viewport$:_e,header$:tt,main$:$t})),...ne("palette").map(e=>jn(e)),...ne("progress").map(e=>Un(e,{progress$:Jr})),...ne("search").map(e=>ti(e,{index$:vi,keyboard$:Br})),...ne("source").map(e=>ai(e))),os=H(()=>T(...ne("announce").map(e=>mn(e)),...ne("content").map(e=>Hn(e,{viewport$:_e,target$:wt,print$:bi})),...ne("content").map(e=>G("search.highlight")?ri(e,{index$:vi,location$:Rt}):L),...ne("header-title").map(e=>In(e,{viewport$:_e,header$:tt})),...ne("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Ur(hi,()=>Qr(e,{viewport$:_e,header$:tt,main$:$t})):Ur(ur,()=>Qr(e,{viewport$:_e,header$:tt,main$:$t}))),...ne("tabs").map(e=>si(e,{viewport$:_e,header$:tt})),...ne("toc").map(e=>ci(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})),...ne("top").map(e=>pi(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})))),gi=rt.pipe(b(()=>os),$e(rs),B(1));gi.subscribe();window.document$=rt;window.location$=Rt;window.target$=wt;window.keyboard$=Br;window.viewport$=_e;window.tablet$=ur;window.screen$=hi;window.print$=bi;window.alert$=Gr;window.progress$=Jr;window.component$=gi;})(); +//# sourceMappingURL=bundle.8fd75fb4.min.js.map + diff --git a/dev/assets/javascripts/bundle.8fd75fb4.min.js.map b/dev/assets/javascripts/bundle.8fd75fb4.min.js.map new file mode 100644 index 000000000..1a287892a --- /dev/null +++ b/dev/assets/javascripts/bundle.8fd75fb4.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + + + + \ No newline at end of file diff --git a/dev/images/demo.gif b/dev/images/demo.gif new file mode 100644 index 000000000..5f32f03da Binary files /dev/null and b/dev/images/demo.gif differ diff --git a/dev/images/kapitan-demo.gif b/dev/images/kapitan-demo.gif new file mode 100644 index 000000000..f34ad5092 Binary files /dev/null and b/dev/images/kapitan-demo.gif differ diff --git a/dev/images/kapitan_logo.png b/dev/images/kapitan_logo.png new file mode 100644 index 000000000..0f4c1118e Binary files /dev/null and b/dev/images/kapitan_logo.png differ diff --git a/dev/images/kapitan_overview.png b/dev/images/kapitan_overview.png new file mode 100644 index 000000000..e229240fd Binary files /dev/null and b/dev/images/kapitan_overview.png differ diff --git a/dev/images/logo.min.svg b/dev/images/logo.min.svg new file mode 100644 index 000000000..1502f7cae --- /dev/null +++ b/dev/images/logo.min.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/dev/index.html b/dev/index.html new file mode 100644 index 000000000..098bae79d --- /dev/null +++ b/dev/index.html @@ -0,0 +1,2067 @@ + + + + + + + + + + + + + + + + + + + + + + + Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kapitan: Keep your ship together

+

GitHub Sponsors +GitHub Stars

+

Kapitan aims to be your one-stop configuration management solution to help you manage the ever growing complexity of your configurations by enabling Platform Engineering and GitOps workflows.

+

It streamlines complex deployments across heterogeneous environments while providing a secure and adaptable framework for managing infrastructure configurations. Kapitan's inventory-driven model, powerful templating capabilities, and native secret management tools offer granular control, fostering consistency, reducing errors, and safeguarding sensitive data.

+

Empower your team to make changes to your infrastructure whilst maintaining full control, with a GitOps approach and full transparency.

+ +

Why do I need Kapitan?

+

Video Tutorials to get started

+
+

Kapitan Youtube Channel

+
+
+
+

+
+
+

+
+
+

+
+
+

+
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_0_kadet/index.html b/dev/kap_proposals/kap_0_kadet/index.html new file mode 100644 index 000000000..1cfbb26cf --- /dev/null +++ b/dev/kap_proposals/kap_0_kadet/index.html @@ -0,0 +1,2112 @@ + + + + + + + + + + + + + + + + + + + + + Kadet - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kadet

+

This introduces a new experimental input type called Kadet.

+

Kadet is essentially a Python module offering a set of classes and functions to define objects which will compile to JSON or YAML. A complete example is available in examples/kubernetes/components/nginx.

+

Author: @ramaro

+

Overview

+

BaseObj

+

BaseObj implements the basic object implementation that compiles into JSON or YAML. +Setting keys in self.root means they will be in the compiled output. Keys can be set as an hierarchy of attributes (courtesy of addict) +The self.body() method is reserved for setting self.root on instantiation:

+

The example below:

+
class MyApp(BaseObj):
+ def body(self):
+   self.root.name = "myapp"
+   self.root.inner.foo = "bar"
+   self.root.list = [1, 2, 3]
+
+

compiles into:

+
---
+name: myapp
+inner:
+  foo: bar
+list:
+  - 1
+  - 2
+  - 3
+
+

The self.new() method can be used to define a basic constructor. +self.need() checks if a key is set and errors if it isn't (with an optional custom error message). +kwargs that are passed onto a new instance of BaseObj are always accessible via self.kwargs +In this example, MyApp needs name and foo to be passed as kwargs.

+
class MyApp(BaseObj):
+ def new(self):
+   self.need("name")
+   self.need("foo", msg="please provide a value for foo")
+
+ def body(self):
+   self.root.name = self.kwargs.name
+   self.root.inner.foo = self.kwargs.foo
+   self.root.list = [1, 2, 3]
+
+obj = MyApp(name="myapp", foo="bar")
+
+

Setting a skeleton

+

Defining a large body with Python can be quite hard and repetitive to read and write. +The self.update_root() method allows importing a YAML/JSON file to set the skeleton of self.root.

+

MyApp's skeleton can be set instead like this:

+
#skel.yml
+---
+name: myapp
+inner:
+  foo: bar
+list:
+  - 1
+  - 2
+  - 3
+
+
class MyApp(BaseObj):
+ def new(self):
+   self.need("name")
+   self.need("foo", msg="please provide a value for foo")
+   self.update_root("path/to/skel.yml")
+
+

Extending a skeleton'd MyApp is possible just by implementing self.body():

+
class MyApp(BaseObj):
+ def new(self):
+   self.need("name")
+   self.need("foo", msg="please provide a value for foo")
+   self.update_root("path/to/skel.yml")
+
+ def body(self):
+   self.set_replicas()
+   self.root.metadata.labels = {"app": "mylabel"}
+
+def set_replicas(self):
+   self.root.spec.replicas = 5
+
+

Inheritance

+

Python inheritance will work as expected:

+
class MyOtherApp(MyApp):
+  def new(self):
+    super().new()  # MyApp's new()
+    self.need("size")
+
+def body(self):
+   super().body()  #  we want to extend MyApp's body
+   self.root.size = self.kwargs.size
+   del self.root.list  # get rid of "list"
+
+obj = MyOtherApp(name="otherapp1", foo="bar2", size=3)
+
+

compiles to:

+
---
+name: otherapp1
+inner:
+  foo: bar2
+replicas: 5
+size: 3
+
+

Components

+

A component in Kadet is a python module that must implement a main() function returning an instance ofBaseObj. The inventory is also available via the inventory() function.

+

For example, a tinyapp component:

+
# components/tinyapp/__init__.py
+from kapitan.inputs.kadet import BaseOBj, inventory
+inv = inventory() # returns inventory for target being compiled
+
+class TinyApp(BaseObj):
+  def body(self):
+    self.root.foo = "bar"
+    self.root.replicas = inv.parameters.tinyapp.replicas
+
+def main():
+  obj = BaseOb()
+  obj.root.deployment = TinyApp() # will compile into deployment.yml
+  return obj
+
+

An inventory class must be created for tinyapp:

+
# inventory/classes/components/tinyapp.yml
+
+parameters:
+  tinyapp:
+    replicas: 1
+  kapitan:
+    compile:
+    - output_path: manifests
+      input_type: kadet
+      output_type: yaml
+      input_paths:
+        - components/tinyapp
+
+

Common components

+

A library in --search-paths (which now defaults to . and lib/) can also be a module that kadet components import. It is loaded using the load_from_search_paths():

+
kubelib = load_from_search_paths("kubelib") # lib/kubelib/__init__.py
+
+def main():
+  obj = BaseObj()
+  obj.root.example_app_deployment = kubelib.Deployment(name="example-app")
+  return obj
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_10_azure_key_vault/index.html b/dev/kap_proposals/kap_10_azure_key_vault/index.html new file mode 100644 index 000000000..eb647428d --- /dev/null +++ b/dev/kap_proposals/kap_10_azure_key_vault/index.html @@ -0,0 +1,2038 @@ + + + + + + + + + + + + + + + + + + + + + Support for - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Support for Azure Key Management

+

This feature will enable users to encrypt secrets using keys stored in Azure's Key Vault. The azkms keyword will be used to access the azure key management backend.

+

Specification

+

key_id uniquely identifies an Azure key object and it's version stored in Key Vault. It is of the form https://{keyvault-name}.vault.azure.net/{object-type}/{object-name}/{object-version}. +It needs to be made accessible to kapitan in one of the following ways:

+
    +
  • As a part of target
  • +
+
parameters:
+  kapitan:
+    secrets:
+      azkms:
+        key: key_id #eg https://kapitanbackend.vault.azure.net/keys/myKey/deadbeef
+
+
    +
  • As a flag
  • +
+
kapitan refs --key=<key_id> --write azkms:/path/to/secret -f file_with_secret_data.txt
+
+

Using a key to encrypt a secret

+

The following command will be used to encrypt a secret (using the specified key from Key Vault) and save it in the refs-path along with it's metadata

+
echo "my_treasured_secret"  | kapitan refs --write azkms:path/to/secret_inside_kapitan -t <target_name> -f -
+
+

The -t <target_name> is used to get the information about key_id.

+

Once the secret is Base64 encoded and encrypted using the key, it will be stored in path/to/secret_inside_kapitan as

+
data: bXlfdHJlYXN1cmVkX3NlY3JldAo=
+encoding: original
+key: https://kapitanbackend.vault.azure.net/keys/myKey/deadbeef
+type: azkms
+
+

note Cryptographic algorithm used for encryption would be rsa-oaep-256. Optimal Asymmetric Encryption Padding (OAEP) is a padding scheme often used together with RSA encryption.

+

referencing a secret

+

Secrets can be refered using ?{azkms:path/to/secret_id} +e.g.

+
parameter:
+    mysql:
+        storage: 10G
+        storage_class: standard
+        image: mysql:latest
+        users:
+            root:
+                password: ?{azkms:path/to/secret}
+
+

Revealing a secret

+

After compilation, the secret reference will be postfixed with 8 characters from the sha256 hash of the retrieved password/secret

+
apiVersion: v1
+data:
+  MYSQL_ROOT_PASSWORD: ?{azkms:path/to/secret:deadbeef}
+kind: Secret
+metadata:
+  labels:
+    name: example-mysql
+  name: example-mysql
+  namespace: minikube-mysql
+type: Opaque
+
+

To reveal the secret, the following command will be used +$ kapitan ref --reveal -f compiled/file/containing/secret

+

Dependencies

+ +

note Kapitan will not be responsible for authentication or access management to Azure

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_11_hashicorp_vault_transit/index.html b/dev/kap_proposals/kap_11_hashicorp_vault_transit/index.html new file mode 100644 index 000000000..2413d25f2 --- /dev/null +++ b/dev/kap_proposals/kap_11_hashicorp_vault_transit/index.html @@ -0,0 +1,2106 @@ + + + + + + + + + + + + + + + + + + + + + Hashicorp Vault Transit - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Hashicorp Vault Transit

+

This feature allows the user to fetch secrets from Hashicorp Vault, with the new secret backend keyword 'vaulttransit'.

+

Author: @xqp @Moep90

+

Specification

+

The following variables need to be exported to the environment(depending on authentication used) where you will run kapitan refs --reveal in order to authenticate to your HashiCorp Vault instance:

+
    +
  • VAULT_ADDR: URL for vault
  • +
  • VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server. Setting this variable is not recommended except during testing
  • +
  • VAULT_TOKEN: token for vault or file (~/.vault-tokens)
  • +
  • VAULT_ROLE_ID: required by approle
  • +
  • VAULT_SECRET_ID: required by approle
  • +
  • VAULT_USERNAME: username to login to vault
  • +
  • VAULT_PASSWORD: password to login to vault
  • +
  • VAULT_CLIENT_KEY: the path to an unencrypted PEM-encoded private key matching the client certificate
  • +
  • VAULT_CLIENT_CERT: the path to a PEM-encoded client certificate for TLS authentication to the Vault server
  • +
  • VAULT_CACERT: the path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
  • +
  • VAULT_CAPATH: the path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
  • +
  • VAULT_NAMESPACE: specify the Vault Namespace, if you have one
  • +
+

Considering any stringdata like any.value:whatever-you_may*like ( in our case let’s encrypt any.value:whatever-you_may*like with vault transit ) using the key 2022-02-13-test in a transit secret engine with mount mytransit on the vault server, to use this as a secret either follow:

+
echo "any.value:whatever-you_may*like" > somefile.txt
+kapitan refs --write vaulttransit:<target_name>/to/secret_inside_kapitan --file somefile.txt --target <target_name>
+
+

or in a single line

+
echo "any.value:whatever-you_may*like"  | kapitan refs --write vaulttransit:<target_name>/to/secret_inside_kapitan -t <target_name> -f -
+
+

The entire string "any.value:whatever-you_may*like" will be encrypted by vault and looks like this in return: vault:v2:Jhn3UzthKcJ2s+sEiO60EUiDmuzqUC4mMBWp2Vjg/DGl+GDFEDIPmAQpc5BdIefkplb6yrJZq63xQ9s=. This then gets base64 encoded and stored in the secret_inside_kapitan. Now secret_inside_kapitan contains the following

+
data: dmF1bHQ6djI6SmhuM1V6dGhLY0oycytzRWlPNjBFVWlEbXV6cVVDNG1NQldwMlZqZy9ER2wrR0RGRURJUG1BUXBjNUJkSWVma3BsYjZ5ckpacTYzeFE5cz0=
+encoding: original
+type: vaulttransit
+vault_params:
+  VAULT_ADDR: http://127.0.0.1:8200
+  VAULT_SKIP_VERIFY: 'True'
+  VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY
+  auth: token
+  crypto_key: key
+  mount: transit
+  always_latest: false
+
+

Encoding tells the type of data given to kapitan, if it is original then after decoding base64 we'll get the original secret and if it is base64 then after decoding once we still have a base64 encoded secret and have to decode again. +Parameters in the secret file are collected from the inventory of the target we gave from CLI --target my_target. If target isn't provided then kapitan will identify the variables from the environment, but providing auth is necessary as a key inside target parameters like the one shown:

+
parameters:
+  kapitan:
+    vars:
+      target: my_target
+      namespace: my_namespace
+    secrets:
+      vaulttransit:
+        VAULT_ADDR: http://vault.example.com:8200
+        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY
+        VAULT_SKIP_VERIFY: "True"
+        auth: token
+        mount: transit
+        crypto_key: new_key   
+        always_latest: False
+
+

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. +Extra parameters that can be defined in inventory are:

+
    +
  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • +
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • +
  • crypto_key: Name of the encryption key defined in vault
  • +
  • always_latest: Always rewrap ciphertext to latest rotated crypto_key version
    +Environment variables should NOT be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID. +This makes the secret_inside_kapitan file accessible throughout the inventory, where we can use the secret whenever necessary like ?{vaulttransit:${target_name}/secret_inside_kapitan}
  • +
+

Following is the example file having a secret and pointing to the vault ?{vaulttransit:${target_name}/secret_inside_kapitan}

+
parameters:
+  releases:
+    app_version: latest
+  app:
+    image: app:app-tag
+    release: ${releases:app_version}
+    replicas: ${replicas}
+    args:
+      - --verbose=${verbose}
+      - --password=?{vaulttransit:${target_name}/secret_inside_kapitan||random:str}
+
+

when ?{vaulttransit:${target_name}/secret_inside_kapitan} is compiled, it will look same with an 8 character prefix of sha256 hash added at the end like:

+
kind: Deployment
+metadata:
+  name: app
+  namespace: my_namespace
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: app
+    spec:
+      containers:
+        - args:
+            - --verbose=True
+            - --password=?{vaulttransit:${target_name}/secret_inside_kapitan||random:str}
+          image: app:app-tag
+          name: app
+
+

Only the user with the required tokens/permissions can reveal the secrets. Please note that the roles and permissions will be handled at the Vault level. We need not worry about it within Kapitan. Use the following command to reveal the secrets:

+
kapitan refs --reveal -f compile/file/containing/secret
+
+

Following is the result of the app-deployment.md file after Kapitan reveal.

+
kind: Deployment
+metadata:
+  name: app
+  namespace: my_namespace
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: app
+    spec:
+      containers:
+        - args:
+            - --verbose=True
+            - --password="any.value:whatever-you_may*like"
+          image: app:app-tag
+          name: app
+
+

Vault policies

+
path "mytransit/encrypt/2022-02-13-test" {
+    capabilities = [ "create", "update" ]
+}
+
+path "mytransit/decrypt/2022-02-13-test" {
+    capabilities = [ "create", "update" ]
+}
+
+

Dependencies

+
    +
  • hvac is a python client for Hashicorp Vault
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_1_external_dependencies/index.html b/dev/kap_proposals/kap_1_external_dependencies/index.html new file mode 100644 index 000000000..af24b4ad7 --- /dev/null +++ b/dev/kap_proposals/kap_1_external_dependencies/index.html @@ -0,0 +1,2006 @@ + + + + + + + + + + + + + + + + + + + + + External dependencies - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

External dependencies

+

This features allows kapitan to fetch files from online repositories/sources during compile and store in a particular target directory.

+

Author: @yoshi-1224

+

Specification

+

Specify the files to be fetched as follows:

+
parameters:
+ kapitan:
+  dependencies:
+   - type: git | http[s]
+     output_path: <output_path>
+     source: <git/http[s]_url>    
+
+

The output path is the path to save the dependency into. For example, it could be /components/external/manifest.jsonnet. Then, the user can specify the fetched file as a kapitan.compile item along with the locally-created files.

+

Git type may also include ref and subdir parameters as illustrated below:

+
- type: git
+  output_path: <output_path>
+  source: <git_url>
+  subdir: relative/path/in/repository
+  ref: <commit_hash/branch/tag>
+  force_fetch: <bool>
+
+

If the file already exists at output_path, the fetch will be skipped. For fresh fetch of the dependencies, users may add --fetch option as follows:

+
kapitan compile --fetch
+
+

Users can also add the force_fetch: true option to the kapitan.dependencies in the inventory in order to force fetch of the dependencies of the target every time.

+

Implementation details

+

Dependencies

+
    +
  • GitPython module (and git executable) for git type
  • +
  • requests module for http[s]
  • +
  • (optional) tqdm for reporting download progress
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_2_helm_charts_input_type/index.html b/dev/kap_proposals/kap_2_helm_charts_input_type/index.html new file mode 100644 index 000000000..f54539b65 --- /dev/null +++ b/dev/kap_proposals/kap_2_helm_charts_input_type/index.html @@ -0,0 +1,2004 @@ + + + + + + + + + + + + + + + + + + + + + Helm Charts Input Type - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Helm Charts Input Type

+

This will allow kapitan, during compilation, to overwrite the values in user-specified helm charts using its inventory by calling the Go & Sprig template libraries. The helm charts can be specified via local path, and users may download the helm chart via external-dependency feature (of http[s] type).

+

Author: @yoshi-1224

+

Specification

+

This feature basically follows the helm template command available.
+This will run after the fetching of the external dependencies takes place, such that users can simultaneously specify the fetch as well as the import of a helm chart dependency.

+

Semantics

+
kapitan:
+  compile:
+    - input_type: helm
+      input_path: <path_to_chart_dir> 
+      output_path: <output_path>
+      set-file:
+        - <optional_file_path>
+        - ...
+      values_file: <optional_values_file>
+      namespace: <optional_namespace>
+
+

This mostly maps to the options available to helm template command (refer to here).

+

Implementation details

+

C-binding between Helm (Go) and Kapitan (Python) will be created. Helm makes use of two template libraries, namely, text/template and Sprig. The code for helm template command will be converted into shared object (.so) using CGo, which exposes C interface that kapitan (i.e. CPython) could use. +The source code for helm template command is found here. This file will be modified to

+
    +
  1. remove redundant options
  2. +
  3. expose C-interface for Kapitan
  4. +
+

Dependencies

+ + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_3_schema_validation/index.html b/dev/kap_proposals/kap_3_schema_validation/index.html new file mode 100644 index 000000000..5bb680303 --- /dev/null +++ b/dev/kap_proposals/kap_3_schema_validation/index.html @@ -0,0 +1,1998 @@ + + + + + + + + + + + + + + + + + + + + + Schema Validation (for k8s) - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Schema Validation (for k8s)

+

If a yaml/json output is to be used as k8s manifest, users may specify its kind and have kapitan validate its structure during kapitan compile. +The plan is to have this validation feature extendable to other outputs as well, such as terraform.

+

Author: @yoshi-1224

+

Specification

+

The following inventory will validate the structure of Kubernetes Service manifest file in .

+
parameters:
+  kapitan:
+    validate:
+       - output_type: kubernetes.service 
+         version: 1.6.6
+         output_path: relative/path/in/target
+
+

version parameter is optional: if omitted, the version will be set to the stable release of kubernetes (tbc).

+

Implementation

+
    +
  • The schemas will be downloaded by requests from +this repository.
  • +
  • Caching of schema will also be implemented.
  • +
+

Dependencies

+
    +
  • jsonschema to validate the output yaml/json against the correct schema
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_4_standalone_executable/index.html b/dev/kap_proposals/kap_4_standalone_executable/index.html new file mode 100644 index 000000000..ee3bb969b --- /dev/null +++ b/dev/kap_proposals/kap_4_standalone_executable/index.html @@ -0,0 +1,1991 @@ + + + + + + + + + + + + + + + + + + + + + Standalone Kapitan Executable (Discontinued) - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Standalone Kapitan Executable (Discontinued)

+

Create a portable (i.e. static) kapitan binary for users. This executable will be made available for each release on Github. The target/tested platform is Debian 9 (possibly Windows to be supported in the future).

+

Criteria:

+
    +
  • speed of the resulting binary
  • +
  • size of the resulting binary
  • +
  • portability of the binary (single-file executable or has an accompanying folder)
  • +
  • cross-platform
  • +
  • actively maintained
  • +
  • supports Python 3.6, 3.7
  • +
+

Author: @yoshi-1224

+

Tools to be explored

+ + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_5_ref_types_redesign/index.html b/dev/kap_proposals/kap_5_ref_types_redesign/index.html new file mode 100644 index 000000000..72e08e1c4 --- /dev/null +++ b/dev/kap_proposals/kap_5_ref_types_redesign/index.html @@ -0,0 +1,2123 @@ + + + + + + + + + + + + + + + + + + + + + Ref Types Redesign - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Ref Types Redesign

+

Redesign Kapitan Secrets and rename them as References or Ref.

+

Breaking changes:

+
    +
  • $ kapitan secrets is replaced with $ kapitan refs
  • +
  • the default secrets directory ./secrets/ changes to ./refs/
  • +
  • the --secrets-path flag changes to --refs-path
  • +
  • ref ref type is renamed to base64 e.g.?{ref:some/ref} into ?{base64:some/ref}
  • +
+

Status: In progress

+

Author: @ramaro

+

Proposal

+

Rename Secrets into Ref (or References) to improve consistency and meaning of the backend types +by removing the ref backend and introducting new backends:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDescriptionEncrypted?Compiles To
gpgGnuPGYeshashed tag
gkmsGoogle KMSYeshashed tag
awskmsAmazon KMSYeshashed tag
base64base64Nohashed tag
plainplain textNoplain text
+

The type value will now need to be representative of the way a reference is stored via its backend.

+

A new plain backend type is introduced and will compile into revealed state instead of a hashed tag.

+

A new base64 backend type will store a base64 encoded value as the backend suggests (replacing the old badly named ref backend).

+

The command line for secrets will be instead:

+
kapitan refs --write gpg:my/secret1 ...
+kapitan refs --write base64:my/file ...
+kapitan refs --write plain:my/info ...
+
+

plain backend

+

The plain backend type will allow referring to external state by updating refs programmatically (e.g. in your pipeline)

+

For example, one can update the value of an environment variable and use ?{plain:my/user} as a reference in a template:

+
echo $USER | kapitan refs --write plain:my/user -f -
+
+

Or update a docker image value as ref ?{plain:images/dev/envoy}:

+
echo 'envoyproxy/envoy:v1.10.0' | kapitan refs --write plain:images/dev/envoy -f -
+
+

These references will be compiled into their values instead of hashed tags.

+

base64 backend

+

The base64 backend type will function as the original ref type. +Except that this time, the name is representative of what is actually happening :)

+

Refs path

+

Refs will be stored by default in the ./refs path set by --refs-path replacing the --secrets-path flag.

+

Background

+

Kapitan Secrets

+

Kapitan Secrets allow referring to restricted information (passwords, private keys, etc...) in templates while also securely storing them.

+

On compile, secret tags are updated into hashed tags which validate and instruct Kapitan how to reveal tags into decrypted or encoded information.

+

Kapitan Secrets example

+

The following command creates a GPG encrypted secret with the contents of file.txt for recipient ramaro@google.com to read:

+
kapitan secrets --write gpg:my/secret1 -f file.txt --recipients ramaro@google.com
+
+

This secret can be referred to in a jsonnet compoment:

+
{
+    "type": "app",
+    "name": "test_app",
+    "username": "user_one",
+    "password": "?{gpg:my/secret1}"
+}
+
+

When this compoment is compiled, it looks like (note the hashed tag):

+
type: app
+name: test_app
+username: user_one
+password: ?{gpg:my/secret1:deadbeef}
+
+

A user with the required permissions can reveal the compiled component:

+
$ kapitan secrets --reveal -f compiled/mytarget/manifests/component.yml
+
+type: app
+name: test_app
+username: user_one
+password: secret_content_of_file.txt
+
+

Secret Backend Comparison

+

Kapitan today offers multiple secret backends:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeDescriptionEncrypted?Compiles To
gpgGnuPGYeshashed tag
gkmsGoogle KMSYeshashed tag
awskmsAmazon KMSYeshashed tag
refbase64Nohashed tag
+

However, not all backends are encrypted - this is not consistent!

+

The ref type is not encrypted as its purpose is to allow getting started with the Kapitan Secrets workflow without +the need of setting up the encryption backends tooling (gpg, gcloud, boto, etc...)

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_6_hashicorp_vault/index.html b/dev/kap_proposals/kap_6_hashicorp_vault/index.html new file mode 100644 index 000000000..a7309374a --- /dev/null +++ b/dev/kap_proposals/kap_6_hashicorp_vault/index.html @@ -0,0 +1,2088 @@ + + + + + + + + + + + + + + + + + + + + + Hashicorp Vault - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Hashicorp Vault

+

This feature allows the user to fetch secrets from Hashicorp Vault, with the new secret backend keyword 'vaultkv'.

+

Author: @vaibahvk @daminisatya

+

Specification

+

The following variables need to be exported to the environment(depending on authentication used) where you will run kapitan refs --reveal in order to authenticate to your HashiCorp Vault instance:

+
    +
  • VAULT_ADDR: URL for vault
  • +
  • VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server. Setting this variable is not recommended except during testing
  • +
  • VAULT_TOKEN: token for vault or file (~/.vault-tokens)
  • +
  • VAULT_ROLE_ID: required by approle
  • +
  • VAULT_SECRET_ID: required by approle
  • +
  • VAULT_USERNAME: username to login to vault
  • +
  • VAULT_PASSWORD: password to login to vault
  • +
  • VAULT_CLIENT_KEY: the path to an unencrypted PEM-encoded private key matching the client certificate
  • +
  • VAULT_CLIENT_CERT: the path to a PEM-encoded client certificate for TLS authentication to the Vault server
  • +
  • VAULT_CACERT: the path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
  • +
  • VAULT_CAPATH: the path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
  • +
  • VAULT_NAMESPACE: specify the Vault Namespace, if you have one
  • +
+

Considering a key-value pair like my_key:my_secret ( in our case let’s store hello:batman inside the vault ) in the path secret/foo in a kv-v2(KV version 2) secret engine on the vault server, to use this as a secret either follow:

+
echo "foo:hello" > somefile.txt
+kapitan refs --write vaultkv:path/to/secret_inside_kapitan --file somefile.txt --target dev-sea
+
+

or in a single line

+
echo "foo:hello"  | kapitan refs --write vaultkv:path/to/secret_inside_kapitan -t dev-sea -f -
+
+

The entire string "foo:hello" is base64 encoded and stored in the secret_inside_kapitan. Now secret_inside_kapitan contains the following

+
data: Zm9vOmhlbGxvCg==
+encoding: original
+type: vaultkv
+vault_params:
+  auth: token
+
+

Encoding tells the type of data given to kapitan, if it is original then after decoding base64 we'll get the original secret and if it is base64 then after decoding once we still have a base64 encoded secret and have to decode again. +Parameters in the secret file are collected from the inventory of the target we gave from CLI --target dev-sea. If target isn't provided then kapitan will identify the variables from the environment, but providing auth is necessary as a key inside target parameters like the one shown:

+
parameters:
+  kapitan:
+    secrets:
+      vaultkv:
+        auth: userpass
+        engine: kv-v2
+        mount: team-alpha-secret
+        VAULT_ADDR: http://127.0.0.1:8200
+        VAULT_NAMESPACE: CICD-alpha
+        VAULT_SKIP_VERIFY: false
+        VAULT_CLIENT_KEY: /path/to/key
+        VAULT_CLIENT_CERT: /path/to/cert
+
+

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. +Extra parameters that can be defined in inventory are:

+
    +
  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • +
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • +
  • engine: secret engine used, either kv-v2 or kv (default kv-v2) +Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID. +This makes the secret_inside_kapitan file accessible throughout the inventory, where we can use the secret whenever necessary like ?{vaultkv:path/to/secret_inside_kapitan}
  • +
+

Following is the example file having a secret and pointing to the vault ?{vaultkv:path/to/secret_inside_kapitan}

+
parameters:
+  releases:
+    cod: latest
+  cod:
+    image: alledm/cod:${cod:release}
+    release: ${releases:cod}
+    replicas: ${replicas}
+    args:
+      - --verbose=${verbose}
+      - --password=?{vaultkv:path/to/secret_inside_kapitan}
+
+

when ?{vaultkv:path/to/secret_inside_kapitan} is compiled, it will look same with an 8 character prefix of sha256 hash added at the end like:

+
kind: Deployment
+metadata:
+  name: cod
+  namespace: dev-sea
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: cod
+    spec:
+      containers:
+        - args:
+            - --verbose=True
+            - --password=?{vaultkv:path/to/secret_inside_kapitan:57d6f9b7}
+          image: alledm/cod:v2.0.0
+          name: cod
+
+

Only the user with the required tokens/permissions can reveal the secrets. Please note that the roles and permissions will be handled at the Vault level. We need not worry about it within Kapitan. Use the following command to reveal the secrets:

+
kapitan refs --reveal -f compile/file/containing/secret
+
+

Following is the result of the cod-deployment.md file after Kapitan reveal.

+
kind: Deployment
+metadata:
+  name: cod
+  namespace: dev-sea
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: cod
+    spec:
+      containers:
+        - args:
+            - --verbose=True
+            - --password=batman
+          image: alledm/cod:v2.0.0
+          name: cod
+
+

Dependencies

+
    +
  • hvac is a python client for Hashicorp Vault
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_7_remote_inventory/index.html b/dev/kap_proposals/kap_7_remote_inventory/index.html new file mode 100644 index 000000000..127aca7c9 --- /dev/null +++ b/dev/kap_proposals/kap_7_remote_inventory/index.html @@ -0,0 +1,2012 @@ + + + + + + + + + + + + + + + + + + + + + Remote Inventory Federation - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Remote Inventory Federation

+

This feature would add the ability to Kapitan to fetch parts of the inventory from remote locations (https/git). This would allow users to combine different inventories from different sources and build modular infrastructure reusable across various repos.

+

Author: @alpharoy14

+

Specification

+

The configuration and declaration of remote inventories would be done in the inventory files.

+

The file specifications are as follows:

+
parameters:
+ kapitan:
+  inventory:
+   - type: <inventory_type> #git\https
+     source: <source_of_inventory>
+     output_path: <relative_output_path>
+
+

On executing the $ kapitan compile --fetch command, first the remote inventories will be fetched followed by fetching of external dependencies and finally merge the inventory to compile.

+

Copying inventory files to the output location

+

The output path is the path to save the inventory items into. The path is relative to the inventory/ directory. For example, it could be /classes/. The contents of the fetched inventory will be recursively copied.

+

The fetched inventory files will be cached in the .dependency_cache directory if --cache is set. +eg. $ kapitan compile --fetch --cache

+

Force fetching

+

While fetching, the output path will be recursively checked to see if it contains any file with the same name. If so, kapitan will skip fetching it.

+

To overwrite the files with the newly downloaded inventory items, we can add the --force-fetch flag to the compile command, as shown below.

+

$ kapitan compile --force-fetch

+

URL type

+

The URL type can be either git or http(s). Depending on the URL type, the configuration file may have additional arguments.

+

E.g Git type may also include aditional ref parameter as illustrated below:

+
inventory:
+ - type: git #git\https
+   source: <source_of_inventory>
+   output_path: <output_path>
+   ref: <commit_hash/branch/tag>
+
+

Implementation details

+

TODO

+

Dependencies

+
    +
  • GitPython module (and git executable) for git type
  • +
  • requests module for http[s]
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_8_google_secret_management/index.html b/dev/kap_proposals/kap_8_google_secret_management/index.html new file mode 100644 index 000000000..e2297b708 --- /dev/null +++ b/dev/kap_proposals/kap_8_google_secret_management/index.html @@ -0,0 +1,2044 @@ + + + + + + + + + + + + + + + + + + + + + Support for - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Support for Google Secret Manager

+

This feature will enable users to retrieve secrets from Google Secret Manager API using the gsm keyword.

+

Specification

+

project_id uniquely identifies GCP projects, and it needs to be made accessible to kapitan in one of the following ways:

+
    +
  • As a part of target
  • +
+
parameters:
+  kapitan:
+    secrets:
+      gsm:
+        project_id: Project_Id
+
+
    +
  • As a flag
  • +
+
kapitan refs --google-project-id=<Project_Id> --write gsm:/path/to/secret_id -f secret_id_file.txt
+
+
    +
  • As an environment variable
  • +
+
export PROJECT_ID=<Project_Id>
+
+

Using a secret

+

In GCP, a secret contains one or more secret versions, along with its metadata. The actual contents of a secret are stored in a secret version. Each secret is identified by a name. We call that variable secret_id e.g. my_treasured_secret. +The URI of the secret becomes projects/<Project_Id>/secrets/my_treasured_secret

+

The following command will be used to add a secret_id to kapitan.

+
echo "my_treasured_secret"  | kapitan refs --write gsm:path/to/secret_inside_kapitan -t <target_name> -f -
+
+

The -t <target_name> is used to get the information about Project_ID.

+

The secret_id is Base64 encoded and stored in path/to/secret_inside_kapitan as

+
data: bXlfdHJlYXN1cmVkX3NlY3JldAo=
+encoding: original
+type: gsm
+gsm_params:
+  project_id: Project_ID
+
+

referencing a secret

+

Secrets can be refered using ?{gsm:path/to/secret_id:version_id} +e.g.

+
parameter:
+    mysql:
+        storage: 10G
+        storage_class: standard
+        image: mysql:latest
+        users:
+            root:
+                password: ?{gsm:path/to/secret_id:version_id}
+
+

Here, version_id will be an optional argument. By default it will point to latest.

+

Revealing a secret

+

After compilation, the secret reference will be postfixed with 8 characters from the sha256 hash of the retrieved password

+
apiVersion: v1
+data:
+  MYSQL_ROOT_PASSWORD: ?{gsm:path/to/secret_id:version_id:deadbeef}
+kind: Secret
+metadata:
+  labels:
+    name: example-mysql
+  name: example-mysql
+  namespace: minikube-mysql
+type: Opaque
+
+

To reveal the secret, the following command will be used +$ kapitan ref --reveal -f compiled/file/containing/secret

+

Dependencies

+ +

note Kapitan will not be responsible for authentication or access management to GCP

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_8_modularize_kapitan/index.html b/dev/kap_proposals/kap_8_modularize_kapitan/index.html new file mode 100644 index 000000000..3ea99c33e --- /dev/null +++ b/dev/kap_proposals/kap_8_modularize_kapitan/index.html @@ -0,0 +1,1986 @@ + + + + + + + + + + + + + + + + + + + + + Modularize Kapitan - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Modularize Kapitan

+

Kapitan is packaged in PYPI and as a binary along with all its dependencies. Adding an extra key/security backend means that we need to ship another dependency with that PYPI package, making deploying changes more complicated. This project would modularize kapitan into core dependencies and extra modules.

+

Usage

+
pip3 install --user kapitan # to install only core dependencies
+Pip3 install --user kapitan[gkms] ​# gkms is the module
+
+

Implementation

+
    +
  • The main module includes the essential kapitan dependencies and reclass dependencies, which will be included in the ​requirement.txt​ file.
  • +
  • The extra module pypi extras will be defined in the s​etup.py​ file.
  • +
  • The extra dependencies are of secret backends like (AWS Key backend, Google KMS Key backend, Vault Key backend etc.) and Helm support.
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/kap_proposals/kap_9_bring_your_own_helm/index.html b/dev/kap_proposals/kap_9_bring_your_own_helm/index.html new file mode 100644 index 000000000..c4bff18a5 --- /dev/null +++ b/dev/kap_proposals/kap_9_bring_your_own_helm/index.html @@ -0,0 +1,1988 @@ + + + + + + + + + + + + + + + + + + + + + Bring Your Own Helm Proposal - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Bring Your Own Helm Proposal

+

The Problem

+

Currently the helm binding can't be run on Mac OSX. Attempts to fix this have been made on several occasions:

+ +

There are some issues with the current bindings besides the lack of Mac OSX support. The golang runtime (1.14) selected will effect older versions helm templates: https://github.com/helm/helm/issues/7711. Users can't select the version of helm they'd like to use for templating.

+

Solution

+

Users supply their own helm binary. This allows them to control the version of golang runtime and version of helm they'd like to use.

+

In Kapitan we could rewrite the interface to use subprocess and perform commands. The cli of helm 2 vs helm 3 is slightly different but shouldn't be difficult to codify.

+

This would be great to get rid of cffi and golang which will reduce complexity and build time of the project.

+

Depending on how this goes, this could pave the way for a "bring your own binary" input type.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/index.html b/dev/pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/index.html new file mode 100644 index 000000000..68e448a86 --- /dev/null +++ b/dev/pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/index.html @@ -0,0 +1,2329 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + 5 Years of Kapitan - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + +
+
+
+
+ + + + + + +
+
+
+
+ + + + + + + +

5 Years of Kapitan

+

Last October we quietly celebrated 5 years of Kapitan.

+

In 5 years, we've been able to witness a steady and relentless of Kapitan, which has however never caught the full attention of the majority of the community.

+

The main issue has always been around an embarassing lack of documentation, and we've worked hard to improve on that, with more updates due soon.

+

Let this first blog post from a revamped website be a promise to our community of a better effort in explaining what sets Kapitan apart, and makes it the only tool of its kind.

+

And let's start with a simple question: Why do you even need Kapitan?

+ + +
+

Credits ❤

+
In reality Kapitan's heatbeat started about 9 months earlier at DeepMind Health, created by [**Ricardo Amaro**](https://github.com/ramaro) with the help of some of my amazing team: in no particular order [Adrian Chifor](https://github.com/adrianchifor), [Paul S](https://github.com/uberspot) and [Luis Buriola](https://github.com/gburiola). It was then kindly released to the community by Google/DeepMind and is has so been improved thanks to more than [50 contributors](https://github.com/kapicorp/kapitan/graphs/contributors).
+
+
+

Why do I need Kapitan?

+

Kapitan is a hard sell, but a rewarding one. For these main reasons:

+
    +
  1. Kapitan solves problems that some don’t know/think to have.
  2. +
  3. Some people by now have probably accepted the Status Quo and think that some suffering is part of their job descriptions.
  4. +
  5. Objectively, Kapitan requires an investment of effort to learn how to use a new tool, and this adds friction.
  6. +
+

All I can say it is very rewarding once you get to use it, so stick with me while I try to explain the problems that Kapitan is solving

+

The problems

+

It would be reductive to list the problems that Kapitan solves, because sometimes we ourselves are stunned by what Kapitan is being used for, so I will start with some common relatable ones, and perhaps that will give you the right framing to understand how to use it with your setup.

+

In its most basic explanation, Kapitan solves the problem of avoiding duplication of configuration data: by consolidating it in one place (the Inventory), and making it accessible by all the tools and languages it integrates with (see Input Types).

+

This configuration data is then used by Kapitan (templates) to configure and operate a number of completely distinct and unaware tools which would normally not be able to share their configurations.

+

Without Kapitan

+

Let's consider the case where you want to define a new bucket, with a given bucket_name. Without Kapitan you would probably need to:

+
    +
  • Write a PR on your Terraform repository to create the new bucket.
  • +
  • Which name should I use? Make sure to write it down! CTRL-C
  • +
  • Write a PR for your values.yaml file to configure your Helm chart: <CTRL-V>
  • +
  • Write somewhere some documentation to write down the bucket name and why it exists. Another <CTRL-V>
  • +
  • Another PR to change some **kustomize** configuration for another service to tell it to use the new bucket <CTRL-V>
  • +
  • Days after, time to upload something to that bucket: gsutil cp my_file wait_what_was_the_bucket_name_again.. Better check the documentation: CTRL-C + <CTRL-V>
  • +
+

With Kapitan

+

When using Kapitan, your changes are likely to be contained within one PR, from which you can have a full view of everything that is happening. What happens is explained in this flow

+

+%%{ init: { securityLevel: 'loose'} }%%
+graph LR
+    classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+    classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+    classDef bold color:#000,font-weight: bold;
+
+    DATA --> KAPITAN
+    BUCKET --> DATA
+    KAPITAN --> KUBERNETES
+    KAPITAN --> TERRAFORM
+    KAPITAN --> DOCUMENTATION
+    KAPITAN --> SCRIPT
+    KAPITAN --> HELM
+    KUBERNETES -->  BUCKET_K8S
+    TERRAFORM -->  BUCKET_TF
+    DOCUMENTATION  --> BUCKET_DOC
+    SCRIPT --> BUCKET_SCRIPT
+    HELM --> BUCKET_HELM
+
+
+    DATA[("All your data")]
+    BUCKET("bucket_name")
+    KAPITAN(("<img src='/images/kapitan_logo.png'; width='150'/>")):::blue
+
+
+    subgraph " "
+      KUBERNETES(["Kubernetes"]):::pink
+      BUCKET_K8S(".. a ConfigMap uses bucket_name"):::bold
+    end
+    subgraph " "
+    TERRAFORM(["Terraform"]):::pink
+    BUCKET_TF("..creates the bucket bucket_name"):::bold
+    end
+    subgraph " "
+    DOCUMENTATION(["Documentation"]):::pink
+    BUCKET_DOC("..references a link to bucket_name"):::bold
+    end
+    subgraph " "
+    SCRIPT(["Canned Script"]):::pink
+    BUCKET_SCRIPT("..knows how to upload files to bucket_name"):::bold
+    end
+    subgraph " "
+    HELM(["Helm"]):::pink
+    BUCKET_HELM("..configures a chart to use the bucket_name"):::bold
+    end
+

Thanks to its flexiblility, you can use Kapitan to generate all sorts of configurations: Kubernetes and Terraform resources, ArgoCD pipelines, Docker Compose files, random configs, scripts, documentations and anything else you find relevant. +The trick is obviously on how to drive these changes, but it is not as complicated as it sounds. We'll get there soon enough!

+

Let's see now another example of things that are so established in the way to do things that become elusivly impossible to see. As a way to highlight the potential issues with this way of doing things, let's ask some questions on your current setup. We pick on Kubernetes this time.

+

Kubernetes

+

I’ll start with Kubernetes, such a popular and brilliant solution to problems most people should not be concerned with (jokes apart, I adore Kubernetes). +To most, Kubernetes is that type of solution that quickly turns into a problem of its own right.

+
+

So.. how do you deploy to Kubernetes right now?

+
+

Helm comes to mind first, right?

+
+

Kapitan + Helm: BFF ❤

+

In spite of Kapitan being initially considered (even by ourselves) as an alternative to Helm, we’ve actually enjoyed the benefits of integrating with this amazing tool and the ecosystem it gives us access to. So yes, good news: you can use Helm right from within Kapitan!.

+
+

Well, let’s put that to a test. How do you manage your Helm charts? I’ll attempt to break these questions down into categories.

+
+
+
+
+
    +
  • Where do you keep your Helm charts?
      +
    • In a single repository?
    • +
    • How many repositories?
    • +
    • Alongside the code you develop?
    • +
    +
  • +
  • What about the official ones that you didn't create yourself?
  • +
+
+
+
+
+
    +
  • How many values.yaml files do you have?
  • +
  • How much consistency is there between them? any snowflakes?
  • +
  • If you change something, like with the bucket_name example above:
      +
    • how many places do you need to go and update?
    • +
    • And how many times do you get it wrong?
    • +
    +
  • +
  • Don't you feel all your charts look the same?
      +
    • Yet how many times do you need to deviate from the one you thought captured everything?
    • +
    • What if you need to make a change to all your charts at once: how do you deal with it?
    • +
    +
  • +
  • What about configuration files, how do you deal with templating those?
  • +
+
+
+
+
+
    +
  • How do you deal with “official” charts, do they always cover what you want to do?
  • +
  • How do you deal with modifications that you need to apply to your own version of a an official chart?
  • +
  • What if you need to make a change that affects ALL your charts?
  • +
  • Or if the change is for all the charts for a set of microservices?
  • +
+
+
+
+
+
    +
  • How many times you find yourself seting parameters on the command line of Helm and other tools?
  • +
  • How many times did you connect to the wrong context in Kubernetes
  • +
  • How many of your colleagues have the same clean context setup as you have?
  • +
  • How many things are there that you wish you were tracking?
  • +
  • How do I connect to the production database? Which user is it again?
  • +
  • How easy is it for you to create a new environment from scratch?
      +
    • Are you sure?
    • +
    • When was the last time you tried?
    • +
    +
  • +
+
+
+
+
+
    +
  • How easy is it to keep your configuration up to date?
  • +
  • Does your documentation need to be “understood” or can be just executed on?
      +
    • How many conditionals like this do you have in your documentation?
      +

      NOTE: Cluster X in project Y has an older version of Q and requires you to do Z instead N because of A, B and C!

      +
      +
    • +
    +
  • +
  • Would you be able to follow those instructions at 3am on a Sunday morning?
  • +
+
+
+
+
+
    +
  • How do you handle secrets in your repository?
  • +
  • Do you know how to create your secrets from scratch?
  • +
  • Do you remember that token you created 4 months ago? How did you do that?
  • +
  • How long would it take you?
  • +
  • Is the process of creating them “secure”?
      +
    • Or does it leave you with random certificates and tokens unencrypted on your “Downloads” folder?
    • +
    +
  • +
+
+
+
+
+
    +
  • The above concerns: do they also apply to other things you manage?
  • +
  • Terraform?
  • +
  • Pipelines?
  • +
  • Random other systems you interact with?
  • +
+
+
+
+
+

I’ll stop here because I do not want to lose you, and neither do I want to discourage you.

+

But if you look around it’s true, you do have a very complicated setup. And Kapitan can help you streamline it for you. In fact, Kapitan can leave you with a consistent and uniform way to manage all these concerns at once.

+

My job here is done: you have awakened and you won't look at your setup in the same way. Keep tuned and learn about how Kapitan can change the way you do things.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0310/index.html b/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0310/index.html new file mode 100644 index 000000000..1c7b1b2cd --- /dev/null +++ b/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0310/index.html @@ -0,0 +1,2074 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + New Kapitan release v0.31.0 - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + +
+
+
+
+ + + + + + +
+
+
+
+ + + + + + + +

New Kapitan release v0.31.0

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release is yet another great bundle of features and improvements over the past year, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

If you missed it, have a look at our latest blog post here 5 years of Kapitan

+

Please help us by visiting our Sponsor Kapitan page.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0320/index.html b/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0320/index.html new file mode 100644 index 000000000..451d2842a --- /dev/null +++ b/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0320/index.html @@ -0,0 +1,2071 @@ + + + + + + + + + + + + + + + + + + + + + + + + + New Kapitan release v0.32.0 - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + +
+
+
+
+ + + + + + +
+
+
+
+ + + + + + + +

New Kapitan release v0.32.0

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release contains loads of improvements for the past 6 months, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

Please help us by visiting our Sponsor Kapitan page.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/12/02/2024/kapitan-logo-new-kapitan-release--v0331/index.html b/dev/pages/blog/12/02/2024/kapitan-logo-new-kapitan-release--v0331/index.html new file mode 100644 index 000000000..c0b0335a7 --- /dev/null +++ b/dev/pages/blog/12/02/2024/kapitan-logo-new-kapitan-release--v0331/index.html @@ -0,0 +1,2071 @@ + + + + + + + + + + + + + + + + + + + + + + + + + New Kapitan release v0.33.1 - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + +
+
+
+
+ + + + + + +
+
+
+
+ + + + + + + +

New Kapitan release v0.33.1

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release contains loads of improvements for the past 8 months, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

Please help us by visiting our Sponsor Kapitan page.

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/index.html b/dev/pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/index.html new file mode 100644 index 000000000..41090499a --- /dev/null +++ b/dev/pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/index.html @@ -0,0 +1,2350 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Deploying Keda with Kapitan - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + + + + + + + + +
+
+
+
+ + + + + + +
+
+
+
+ + + + + + + +

Deploying Keda with Kapitan

+

We have worked hard to bring out a brand new way of experience Kapitan, through something that we call generators

+

Although the concept is something we've introduced in 2020 with our blog post Keep your ship together with Kapitan, the sheer amount of new capabilities (and frankly, the embarassing lack of documentation and examples) forces me to show you the new capabilities using a practicle example: deploying Keda.

+

Objective of this tutorial

+

We are going to deploy Keda using the helm chart approach. While Kapitan supports a native way to deploy helm charts using the helm input type, we are going instead to use a generator based approach using the "charts" generator.

+

This tutorial will show you how to configure kapitan to:

+
    +
  • download a helm chart
  • +
  • compile a helm chart
  • +
  • modify a helm chart using mutations
  • +
+

The content of this tutorial is already available on the kapitan-reference

+ + +

Deploying KEDA

+

Define parameters

+
## inventory/classes/components/keda.yml
+parameters:
+  keda:
+    params:
+      # Variables to reference from other places
+      application_version: 2.11.2
+      service_account_name: keda-operator
+      chart_name: keda
+      chart_version: 2.11.2
+      chart_dir: system/sources/charts/${keda:params:chart_name}/${keda:params:chart_name}/${keda:params:chart_version}/${keda:params:application_version}
+      namespace: keda
+      helm_values: {}
+...
+
+
+

Override Helm Values

+

As an example we could be passing to helm an override to the default values parameters to make the operator deploy 2 replicas.

+
  helm_values:
+    operator:
+      replicaCount: 2  
+
+
+

Download the chart

+

Kapitan supports downloading dependencies, including helm charts.

+

When Kapitan is run with the --fetch, it will download the dependency if not already present. +Use --force-fetch if you want to download it every time. Learn more about External dependencies

+
## inventory/classes/components/keda.yml
+...
+  kapitan:
+    dependencies:
+      # Tells kapitan to download the helm chart into the chart_dir directory
+      - type: helm
+        output_path: ${keda:params:chart_dir}
+        source: https://kedacore.github.io/charts
+        version: ${keda:params:chart_version}
+        chart_name: ${keda:params:chart_name}
+...
+
+
+

Parameter interpolation

+

Notice how we are using parameter interpolation from the previously defined keda.params section. This will make it easier in the future to override some aspects of the configuration on a per-target base.

+
+

Generate the chart

+
## inventory/classes/components/keda.yml
+...
+  charts:
+     # Configures a helm generator to compile files for the given chart
+    keda:
+      chart_dir: ${keda:params:chart_dir}
+      helm_params:
+        namespace: ${keda:params:namespace}
+        name: ${keda:params:chart_name}
+      helm_values: ${keda:params:helm_values}
+
+

Compile

+

Before we can see any effect, we need to attach the class to a target. We will create a simple target which looks like

+
# inventory/targets/tutorials/keda.yml
+classes:
+- common
+- components.keda
+
+

Now when we run kapitan compile we will see the chart being donwloaded and the manifests being produced.

+
./kapitan compile -t keda --fetch
+Dependency keda: saved to system/sources/charts/keda/keda/2.11.2/2.11.2
+Rendered inventory (1.87s)
+Compiled keda (2.09s)
+
+
+

kapitan compile breakdown

+
    +
  • --fetch tells kapitan to fetch the chart if it is not found locally
  • +
  • -t keda tells kapitan to compile only the previously defined keda.yml target
  • +
+
+
ls -l compiled/keda/manifests/
+total 660
+-rw-r--r-- 1 ademaria root 659081 Aug 29 10:25 keda-bundle.yml
+-rw-r--r-- 1 ademaria root     79 Aug 29 10:25 keda-namespace.yml
+-rw-r--r-- 1 ademaria root   7092 Aug 29 10:25 keda-rbac.yml
+-rw-r--r-- 1 ademaria root   1783 Aug 29 10:25 keda-service.yml
+
+

Using mutations

+

Now let's do a couple of things that would not be easy to do with helm natively.

+

You can already notice that the content of the chart is being splitted into multiple files: this is because the Generator is configured to separate different resources types into different files for convenience and consistency. The mechanism behing it is the "Mutation" of type "bundle" which tells Kapitan which file to save a resource into.

+

Here are some example "mutation" which separates different kinds into different files

+
        mutations:
+          bundle:
+            - conditions:
+                kind: [Ingress]
+              filename: '{content.component_name}-ingress'
+              ...
+            - conditions:
+                kind: [HorizontalPodAutoscaler, PodDisruptionBudget, VerticalPodAutoscaler]
+              filename: '{content.component_name}-scaling'
+            - conditions:
+                kind: ['*']
+              filename: '{content.component_name}-bundle'
+
+
+

Catch-all rule

+

Notice the catchall rule at the end that puts everything that has not matched into the bundle.yml file

+
+

bundle mutation

+

Currently most of the keda related resources are bundled into the -bundle.yml file +Instead, we want to separate them into their own file.

+

Let's add this configuration:

+
  charts:
+     # Configures a helm generator to compile files for the given chart
+    keda:
+      chart_dir: ${keda:params:chart_dir}
+      ... 
+      mutations:
+        bundle:
+          - conditions:
+              # CRDs need to be setup separately
+              kind: [CustomResourceDefinition]
+            filename: '{content.component_name}-crds'
+
+

Upon compile, you can now see that the CRD are being moved to a different file:

+
ls -l compiled/keda/manifests/
+total 664
+-rw-r--r-- 1 ademaria root  11405 Aug 29 10:56 keda-bundle.yml
+-rw-r--r-- 1 ademaria root 647672 Aug 29 10:56 keda-crds.yml
+-rw-r--r-- 1 ademaria root     79 Aug 29 10:56 keda-namespace.yml
+-rw-r--r-- 1 ademaria root   7092 Aug 29 10:56 keda-rbac.yml
+-rw-r--r-- 1 ademaria root   1783 Aug 29 10:56 keda-service.yml
+
+

patch mutation

+

As we are using Argo, we want to pass a special argocd.argoproj.io/sync-options annotation to the CRD only so that ArgoCD can handle them properly.

+

For this we are going to use the patch mutation:

+
...
+      mutations:
+...
+        patch:
+          - conditions:
+              kind: [CustomResourceDefinition]
+            patch:
+              metadata:
+                annotations:
+                  argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true
+
+

Upon compile, you can now see that the CRDs have been modified as required:

+
diff --git a/compiled/keda/manifests/keda-crds.yml b/compiled/keda/manifests/keda-crds.yml
+index 2662bf3..9306c3a 100644
+--- a/compiled/keda/manifests/keda-crds.yml
++++ b/compiled/keda/manifests/keda-crds.yml
+@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+   annotations:
++    argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true
+     controller-gen.kubebuilder.io/version: v0.12.0
+
+

Summary

+

With this tutorial have explored some capabilities of Kapitan to manage and perform changes to helm charts. +Next tutorial will show how to make use of Keda and deploy a generator for Keda resources

+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/blog/index.html b/dev/pages/blog/index.html new file mode 100644 index 000000000..e69d928e9 --- /dev/null +++ b/dev/pages/blog/index.html @@ -0,0 +1,2167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Blog - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+
+

Blog

+
+ +
+
+ + +
+
+

New Kapitan release v0.33.1

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release contains loads of improvements for the past 8 months, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

Please help us by visiting our Sponsor Kapitan page.

+ +
+
+ +
+
+ + +
+
+

Deploying Keda with Kapitan

+

We have worked hard to bring out a brand new way of experience Kapitan, through something that we call generators

+

Although the concept is something we've introduced in 2020 with our blog post Keep your ship together with Kapitan, the sheer amount of new capabilities (and frankly, the embarassing lack of documentation and examples) forces me to show you the new capabilities using a practicle example: deploying Keda.

+

Objective of this tutorial

+

We are going to deploy Keda using the helm chart approach. While Kapitan supports a native way to deploy helm charts using the helm input type, we are going instead to use a generator based approach using the "charts" generator.

+

This tutorial will show you how to configure kapitan to:

+
    +
  • download a helm chart
  • +
  • compile a helm chart
  • +
  • modify a helm chart using mutations
  • +
+

The content of this tutorial is already available on the kapitan-reference

+ + +
+
+ +
+
+ + +
+
+

5 Years of Kapitan

+

Last October we quietly celebrated 5 years of Kapitan.

+

In 5 years, we've been able to witness a steady and relentless of Kapitan, which has however never caught the full attention of the majority of the community.

+

The main issue has always been around an embarassing lack of documentation, and we've worked hard to improve on that, with more updates due soon.

+

Let this first blog post from a revamped website be a promise to our community of a better effort in explaining what sets Kapitan apart, and makes it the only tool of its kind.

+

And let's start with a simple question: Why do you even need Kapitan?

+ + +
+
+ +
+
+ + +
+
+

New Kapitan release v0.31.0

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release is yet another great bundle of features and improvements over the past year, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

If you missed it, have a look at our latest blog post here 5 years of Kapitan

+

Please help us by visiting our Sponsor Kapitan page.

+ +
+
+ +
+
+ + +
+
+

New Kapitan release v0.32.0

+

The Kapicorp team is happy to to announce a new release of Kapitan.

+

This release contains loads of improvements for the past 6 months, the majority of which have been contributions from our community!

+

Head over our release page on GitHub for a full list of features and contributors.

+

Please help us by visiting our Sponsor Kapitan page.

+ +
+
+ + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_compile/index.html b/dev/pages/commands/kapitan_compile/index.html new file mode 100644 index 000000000..d5ac6b0e5 --- /dev/null +++ b/dev/pages/commands/kapitan_compile/index.html @@ -0,0 +1,2285 @@ + + + + + + + + + + + + + + + + + + + + + + + + + compile - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | kapitan compile

+

kapitan compile

+

Merges inventory and inputs and produces generated files in the output folder (/compiled by default)

+

Compile all targets

+
+
kapitan compile
+
+
+click to expand output +
Compiled mysql-generator-fetch (0.18s)
+Compiled vault (0.25s)
+Compiled pritunl (0.22s)
+Compiled gke-pvm-killer (0.05s)
+Compiled examples (0.30s)
+Compiled mysql (0.08s)
+Compiled postgres-proxy (0.06s)
+Compiled echo-server (0.06s)
+Compiled global (0.03s)
+Compiled guestbook-argocd (0.08s)
+Compiled tutorial (0.13s)
+Compiled kapicorp-project-123 (0.03s)
+Compiled kapicorp-demo-march (0.03s)
+Compiled kapicorp-terraform-admin (0.03s)
+Compiled sock-shop (0.32s)
+Compiled tesoro (0.09s)
+Compiled dev-sockshop (0.32s)
+Compiled prod-sockshop (0.38s)
+Compiled argocd (2.29s)
+
+
+
+

Selective compilation

+

Using target names

+

Compiles one or more targets selected by name using --targets or -t

+
+
kapitan compile -t mysql tesoro
+
+
+click to expand output +
Compiled mysql (0.06s)
+Compiled tesoro (0.09s)
+
+
+
+

Using labels

+

Compiles one or more targets selected matching labels with --labels or -l

+
+

Info

+

This works if you have labelled your targets using the following syntax:

+
parameters:
+  ...
+  kapitan:
+    ...
+    labels:
+      customer: acme
+
+

see Labels for more details

+
+
$ kapitan compile -l customer=acme
+Compiled acme-project (0.14s)
+Compiled acme-pipelines (0.10s)
+
+

Fetch on compile

+

Use the --fetch flag to fetch Remote Inventories and the External Dependencies.

+
kapitan compile --fetch
+
+

This will download the dependencies according to their configurations +By default, kapitan does not overwrite an existing item with the same name as that of the fetched inventory items.

+

Use the --force-fetch flag to force fetch (update cache with freshly fetched items) and overwrite inventory items of the same name in the output_path.

+
kapitan compile --force-fetch
+
+

Use the --cache flag to cache the fetched items in the .dependency_cache directory in the root project directory.

+
kapitan compile --cache --fetch
+
+

Embed references

+

By default, Kapitan references are stored encrypted (for backends that support encription) in the configuration repository under the /refs directory.

+

For instance, a reference tag ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de} would point to a phisical file on disk under /refs like:

+
+

refs/targets/minikube-mysql/mysql/password

+
data: hQEMA8uOJKdm07XTAQgAp5i [[ CUT ]] BwqYc3g7PI09HCJZdU=
+encoding: base64
+recipients:
+- fingerprint: D9234C61F58BEB3ED8552A57E28DC07A3CBFAE7C
+type: gpg
+
+
+

The --embed-refs flags tells Kapitan to embed these references on compile, alongside the generated output. By doing so, compiled output is self-contained and can be revealed by Tesoro or other tools.

+
+
kapitan compile --embed-refs
+
+

See how the compiled output for this specific target changes to embed the actul encrypted content, (marked by ?{gpg: :embedded} to indicate it is a gpg reference) rather than just holding a reference to it (like in this case ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de} which points to ).

+
+click to expand output +
diff --git a/examples/kubernetes/compiled/minikube-mysql/manifests/mysql_app.yml b/examples/kubernetes/compiled/minikube-mysql/manifests/mysql_app.yml
+[[ CUT ]]
+apiVersion: v1
+data:
+-  MYSQL_ROOT_PASSWORD: ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de}
+-  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:targets/minikube-mysql/mysql/password_sha256:122d2732}
++  MYSQL_ROOT_PASSWORD: ?{gpg:eyJkYXRhIjogImhR [[ CUT ]] gInR5cGUiOiAiZ3BnIn0=:embedded}
++  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:eyJkYXRhI [[ CUT ]] eXBlIjogImdwZyJ9:embedded}
+
+
+
+

help

+
+
kapitan compile --help
+
+
+click to expand output +
usage: kapitan compile [-h] [--inventory-backend {reclass}]
+               [--search-paths JPATH [JPATH ...]]
+               [--jinja2-filters FPATH] [--verbose] [--prune]
+               [--quiet] [--output-path PATH] [--fetch]
+               [--force-fetch] [--force] [--validate]
+               [--parallelism INT] [--indent INT]
+               [--refs-path REFS_PATH] [--reveal] [--embed-refs]
+               [--inventory-path INVENTORY_PATH] [--cache]
+               [--cache-paths PATH [PATH ...]]
+               [--ignore-version-check] [--use-go-jsonnet]
+               [--compose-target-name] [--schemas-path SCHEMAS_PATH]
+               [--yaml-multiline-string-style STYLE]
+               [--yaml-dump-null-as-empty]
+               [--targets TARGET [TARGET ...] | --labels
+               [key=value ...]]
+
+options:
+  -h, --help            show this help message and exit
+  --inventory-backend {reclass,reclass-rs}
+                        Select the inventory backend to use (default=reclass)
+  --search-paths JPATH [JPATH ...], -J JPATH [JPATH ...]
+                        set search paths, default is ["."]
+  --jinja2-filters FPATH, -J2F FPATH
+                        load custom jinja2 filters from any file, default is
+                        to put them inside lib/jinja2_filters.py
+  --verbose, -v         set verbose mode
+  --prune               prune jsonnet output
+  --quiet               set quiet mode, only critical output
+  --output-path PATH    set output path, default is "."
+  --fetch               fetch remote inventories and/or external dependencies
+  --force-fetch         overwrite existing inventory and/or dependency item
+  --force               overwrite existing inventory and/or dependency item
+  --validate            validate compile output against schemas as specified
+                        in inventory
+  --parallelism INT, -p INT
+                        Number of concurrent compile processes, default is 4
+  --indent INT, -i INT  Indentation spaces for YAML/JSON, default is 2
+  --refs-path REFS_PATH
+                        set refs path, default is "./refs"
+  --reveal              reveal refs (warning: this will potentially write
+                        sensitive data)
+  --embed-refs          embed ref contents
+  --inventory-path INVENTORY_PATH
+                        set inventory path, default is "./inventory"
+  --cache, -c           enable compilation caching to .kapitan_cache and
+                        dependency caching to .dependency_cache, default is
+                        False
+  --cache-paths PATH [PATH ...]
+                        cache additional paths to .kapitan_cache, default is
+                        []
+  --ignore-version-check
+                        ignore the version from .kapitan
+  --use-go-jsonnet      use go-jsonnet
+  --compose-target-name   Create same subfolder structure from inventory/targets
+                        inside compiled folder
+  --schemas-path SCHEMAS_PATH
+                        set schema cache path, default is "./schemas"
+  --yaml-multiline-string-style STYLE, -L STYLE
+                        set multiline string style to STYLE, default is
+                        'double-quotes'
+  --yaml-dump-null-as-empty
+                        dumps all none-type entries as empty, default is
+                        dumping as 'null'
+  --targets TARGET [TARGET ...], -t TARGET [TARGET ...]
+                        targets to compile, default is all
+  --labels [key=value ...], -l [key=value ...]
+                        compile targets matching the labels, default is all
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_dotfile/index.html b/dev/pages/commands/kapitan_dotfile/index.html new file mode 100644 index 000000000..89f810696 --- /dev/null +++ b/dev/pages/commands/kapitan_dotfile/index.html @@ -0,0 +1,2105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + kapitan dotfile - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | .kapitan config file

+

.kapitan

+

Kapitan allows you to coveniently override defaults by specifying a local .kapitan file in the root of your repository (relative to the kapitan configuration):

+

This comes handy to make sure Kapitan runs consistently for your specific setup.

+
+

Info

+

Any Kapitan command can be overridden in the .kapitan dotfile, but here are some of the most common examples.

+
+

version

+

To enforce the Kapitan version used for compilation (for consistency and safety), you can add version to .kapitan:

+
version: 0.30.0
+
+...
+
+

This constrain can be relaxed to allow minor versions to be also accepted:

+
version: 0.30 # Allows any 0.30.x release to run
+
+...
+
+

Command line flags

+

You can also permanently define all command line flags in the .kapitan config file. For example:

+
...
+
+compile:
+  indent: 4
+  parallelism: 8
+
+

would be equivalent to running:

+
kapitan compile --indent 4 --parallelism 8
+
+

For flags which are shared by multiple commands, you can either selectively define them for single commmands in a section with the same name as the command, or you can set any flags in section global, in which case they're applied for all commands. +If you set a flag in both the global section and a command's section, the value from the command's section takes precedence over the value from the global section.

+

As an example, you can configure the inventory-path in the global section of the Kapitan dotfile to make sure it's persisted across all Kapitan runs.

+
...
+
+global:
+  inventory-path: ./some_path
+
+

which would be equivalent to running any command with --inventory-path=./some_path.

+

Another flag that you may want to set in the global section is inventory-backend to select a non-default inventory backend implementation.

+
global:
+  inventory-backend: reclass
+
+

which would be equivalent to always running Kapitan with --inventory-backend=reclass.

+

Please note that the inventory-backend flag currently can't be set through the command-specific sections of the Kapitan config file.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_inventory/index.html b/dev/pages/commands/kapitan_inventory/index.html new file mode 100644 index 000000000..6dd7a3d05 --- /dev/null +++ b/dev/pages/commands/kapitan_inventory/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + inventory - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | kapitan inventory

+

kapitan inventory

+

Renders the resulting inventory values for a specific target.

+

For example, rendering the inventory for the mysql target:

+
+
kapitan inventory -t mysql
+
+
+click to expand output +
__reclass__:
+  environment: base
+  name: mysql
+  node: mysql
+  timestamp: Wed Nov 23 23:19:28 2022
+  uri: yaml_fs:///src/inventory/targets/examples/mysql.yml
+applications: []
+classes:
+  - kapitan.kube
+  - kapitan.generators.kubernetes
+  - kapitan.generators.argocd
+  - kapitan.generators.terraform
+  - kapitan.generators.rabbitmq
+  - kapitan.common
+  - common
+  - components.mysql
+environment: base
+exports: {}
+parameters:
+  _reclass_:
+    environment: base
+    name:
+      full: mysql
+      short: mysql
+  components:
+    mysql:
+      config_maps:
+        config:
+          data:
+            mysql.cnf:
+              value: ignore-db-dir=lost+found
+            mytemplate.cnf:
+              template: components/mysql/mytemplate.cnf.j2
+              values:
+                mysql:
+                  client:
+                    port: 3306
+                    socket: /var/run/mysqld/mysqld.sock
+                  mysqld:
+                    bind-address: 127.0.0.1
+                    max_allowed_packet: 64M
+                    thread_concurrency: 8
+          mount: /etc/mysql/conf.d/
+      env:
+        MYSQL_DATABASE: ''
+        MYSQL_PASSWORD:
+          secretKeyRef:
+            key: mysql-password
+        MYSQL_ROOT_PASSWORD:
+          secretKeyRef:
+            key: mysql-root-password
+        MYSQL_USER: ''
+      image: mysql:5.7.28
+      ports:
+        mysql:
+          service_port: 3306
+      secrets:
+        secrets:
+          data:
+            mysql-password:
+              value: ?{plain:targets/mysql/mysql-password||randomstr|base64}
+            mysql-root-password:
+              value: ?{plain:targets/mysql/mysql-root-password||randomstr:32|base64}
+          versioned: true
+      type: statefulset
+      volume_claims:
+        datadir:
+          spec:
+            accessModes:
+              - ReadWriteOnce
+            resources:
+              requests:
+                storage: 10Gi
+            storageClassName: standard
+      volume_mounts:
+        datadir:
+          mountPath: /var/lib/mysql
+  docs:
+    - templates/docs/README.md
+  generators:
+    manifest:
+      default_config:
+        annotations:
+          manifests.kapicorp.com/generated: 'true'
+        service_account:
+          create: false
+        type: deployment
+  kapitan:
+    compile:
+      - input_paths:
+          - components/generators/kubernetes
+        input_type: kadet
+        output_path: manifests
+        output_type: yml
+      - input_params:
+          function: generate_docs
+          template_path: templates/docs/service_component.md.j2
+        input_paths:
+          - components/generators/kubernetes
+        input_type: kadet
+        output_path: docs
+        output_type: plain
+      - input_params:
+          function: generate_pre_deploy
+        input_paths:
+          - components/generators/kubernetes
+        input_type: kadet
+        output_path: pre-deploy
+        output_type: yml
+      - input_paths:
+          - components/generators/argocd
+        input_type: kadet
+        output_path: argocd
+        output_type: yml
+      - input_params:
+          generator_root: resources.tf
+        input_paths:
+          - components/generators/terraform
+        input_type: kadet
+        output_path: terraform
+        output_type: json
+      - ignore_missing: true
+        input_paths:
+          - resources/state/mysql/.terraform.lock.hcl
+        input_type: copy
+        output_path: terraform/
+      - input_paths:
+          - components/generators/rabbitmq
+        input_type: kadet
+        output_path: rabbitmq
+        output_type: yml
+      - input_paths:
+          - templates/docs/README.md
+        input_type: jinja2
+        output_path: docs
+      - input_paths: []
+        input_type: jinja2
+        output_path: scripts
+      - input_paths: []
+        input_type: jsonnet
+        output_path: manifests
+        output_type: yml
+    dependencies:
+      - output_path: lib/kube.libsonnet
+        source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube.libsonnet
+        type: https
+      - output_path: lib/kube-platforms.libsonnet
+        source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube-platforms.libsonnet
+        type: https
+      - output_path: components/generators/kubernetes
+        ref: master
+        source: https://github.com/kapicorp/kapitan-reference.git
+        subdir: components/generators/kubernetes
+        type: git
+      - output_path: components/generators/terraform
+        ref: master
+        source: https://github.com/kapicorp/kapitan-reference.git
+        subdir: components/generators/terraform
+        type: git
+    vars:
+      target: mysql
+  manifests: []
+  mysql:
+    settings:
+      client:
+        port: 3306
+        socket: /var/run/mysqld/mysqld.sock
+      mysqld:
+        bind-address: 127.0.0.1
+        max_allowed_packet: 64M
+        thread_concurrency: 8
+  namespace: mysql
+  scripts: []
+  target_name: mysql
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_lint/index.html b/dev/pages/commands/kapitan_lint/index.html new file mode 100644 index 000000000..8c2c5d772 --- /dev/null +++ b/dev/pages/commands/kapitan_lint/index.html @@ -0,0 +1,2066 @@ + + + + + + + + + + + + + + + + + + + + + + + + + lint - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | kapitan lint

+

kapitan lint

+

Perform a checkup on your inventory or refs.

+
+
./kapitan lint
+
+
+click to expand output +
Running yamllint on all inventory files...
+
+.yamllint not found. Using default values
+File ./inventory/classes/components/echo-server.yml has the following issues:
+        95:29: forbidden implicit octal value "0550" (octal-values)
+File ./inventory/classes/terraform/gcp/services.yml has the following issues:
+        15:11: duplication of key "enable_compute_service" in mapping (key-duplicates)
+
+Total yamllint issues found: 2
+
+Checking for orphan classes in inventory...
+
+No usage found for the following 6 classes:
+{'components.argoproj.cd.argocd-server-oidc',
+'components.helm.cert-manager-helm',
+'components.rabbitmq-operator.rabbitmq-configuration',
+'components.rabbitmq-operator.rabbitmq-operator',
+'features.gkms-demo',
+'projects.localhost.kubernetes.katacoda'}
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_searchvar/index.html b/dev/pages/commands/kapitan_searchvar/index.html new file mode 100644 index 000000000..7602c5ef3 --- /dev/null +++ b/dev/pages/commands/kapitan_searchvar/index.html @@ -0,0 +1,2072 @@ + + + + + + + + + + + + + + + + + + + + + + + + + searchvar - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | kapitan searchvar

+

kapitan searchvar

+

Shows all inventory files where a variable is declared:

+
+
./kapitan searchvar parameters.components.*.image
+
+
+click to expand output +
./inventory/classes/components/vault.yml                     ${vault:image}
+./inventory/classes/components/logstash.yml                  eu.gcr.io/antha-images/logstash:7.5.1
+./inventory/classes/components/gke-pvm-killer.yml            estafette/estafette-gke-preemptible-killer:1.2.5
+./inventory/classes/components/mysql.yml                     mysql:5.7.28
+./inventory/classes/components/postgres-proxy.yml            gcr.io/cloudsql-docker/gce-proxy:1.16
+./inventory/classes/components/echo-server.yml               jmalloc/echo-server
+./inventory/classes/components/trivy.yml                     ${trivy:image}
+./inventory/classes/components/filebeat.yml                  ${filebeat:image}:${filebeat:version}
+./inventory/classes/components/pritunl/pritunl-mongo.yml     docker.io/bitnami/mongodb:4.2.6-debian-10-r23
+./inventory/classes/components/pritunl/pritunl.yml           alledm/pritunl
+./inventory/classes/components/weaveworks/user-db.yml        weaveworksdemos/user-db:0.3.0
+./inventory/classes/components/weaveworks/catalogue.yml      weaveworksdemos/catalogue:0.3.5
+./inventory/classes/components/weaveworks/user.yml           weaveworksdemos/user:0.4.7
+./inventory/classes/components/weaveworks/session-db.yml     redis:alpine
+./inventory/classes/components/weaveworks/catalogue-db.yml   weaveworksdemos/catalogue-db:0.3.0
+./inventory/classes/components/weaveworks/carts-db.yml       mongo
+./inventory/classes/components/weaveworks/orders-db.yml      mongo
+./inventory/classes/components/weaveworks/orders.yml         weaveworksdemos/orders:0.4.7
+./inventory/classes/components/weaveworks/shipping.yml       weaveworksdemos/shipping:0.4.8
+./inventory/classes/components/weaveworks/queue-master.yml   weaveworksdemos/queue-master:0.3.1
+./inventory/classes/components/weaveworks/rabbitmq.yml       rabbitmq:3.6.8-management
+./inventory/classes/components/weaveworks/payment.yml        weaveworksdemos/payment:0.4.3
+./inventory/classes/components/weaveworks/front-end.yml      weaveworksdemos/front-end:0.3.12
+./inventory/classes/components/weaveworks/carts.yml          weaveworksdemos/carts:0.4.8
+./inventory/classes/components/kapicorp/tesoro.yml           kapicorp/tesoro
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/commands/kapitan_validate/index.html b/dev/pages/commands/kapitan_validate/index.html new file mode 100644 index 000000000..81b133824 --- /dev/null +++ b/dev/pages/commands/kapitan_validate/index.html @@ -0,0 +1,2194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + validate - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

CLI Reference | kapitan validate

+

kapitan validate

+

Validates the schema of compiled output. Validate options are specified in the inventory under parameters.kapitan.validate. Supported types are:

+

Usage

+
+
+
+
+
kapitan validate
+
+
+click to expand output +
created schema-cache-path at ./schemas
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml
+
+
+
+
+
+
+
kapitan compile --validate
+
+
+click to expand output +
Rendered inventory (0.27s)
+Compiled labels (0.23s)
+Compiled removal (0.00s)
+Compiled busybox (0.24s)
+Compiled minikube-nginx-jsonnet (0.49s)
+Compiled minikube-nginx-kadet (0.25s)
+Compiled minikube-mysql (0.59s)
+Compiled minikube-es (1.17s)
+Compiled all-glob (1.55s)
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml
+
+
+
+
+
+

You can leverage the .kapitan dotfile to make sure validate runs every time you run compile.

+
+
+

example .kapitan

+
...
+
+compile:
+  validate: true
+
+
+

The validate command will now be implied for every compile run +

kapitan compile
+

+
+click to expand output +
Rendered inventory (0.27s)
+Compiled labels (0.23s)
+Compiled removal (0.00s)
+Compiled busybox (0.24s)
+Compiled minikube-nginx-jsonnet (0.49s)
+Compiled minikube-nginx-kadet (0.25s)
+Compiled minikube-mysql (0.59s)
+Compiled minikube-es (1.17s)
+Compiled all-glob (1.55s)
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml
+Validation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml
+
+
+
+
+
+
+

Kubernetes Setup

+

Kubernetes has different resource kinds, for instance:

+
    +
  • service
  • +
  • deployment
  • +
  • statefulset
  • +
+

Kapitan has built in support for validation of Kubernetes kinds, and automatically integrates with https://kubernetesjsonschema.dev. See github.com/instrumenta/kubernetes-json-schema for more informations.

+
+

Info

+

Kapitan will automatically download the schemas for Kubernetes Manifests directly from https://kubernetesjsonschema.dev

+

By default, the schemas are cached into ./schemas/, which can be modified with the --schemas-path option.

+
+

override permanently schema-path

+

Remember to use the .kapitan dotfile configuration to override permanently the schema-path location.

+
$ cat .kapitan
+# other options abbreviated for clarity
+validate:
+  schemas-path: custom/schemas/cache/path
+
+
+
+

Example

+

Refer to the mysql example.

+
kubernetes/inventory/classes/component/mysql.yml
    validate: 
+    - type: kubernetes # mkdocs (1)! 
+      output_paths: # mkdocs (2)! 
+        - manifests/mysql_secret.yml
+      kind: secret # temporarily replaced with 'deployment' during test # mkdocs (3)! 
+      version: 1.14.0 # optional, defaults to 1.14.0 # mkdocs (4)! 
+    - type: kubernetes
+      output_paths:
+        - manifests/mysql_service_jsonnet.yml
+        - manifests/mysql_service_simple.yml
+      kind: service
+      version: 1.14.0
+
+
    +
  1. type | currently only Kubernetes is supported
  2. +
  3. output_paths | list of files to validate
  4. +
  5. kind | a Kubernetes resource kind
  6. +
  7. version | a Kubernetes API version, defaults to 1.14.0
  8. +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/contribute/code/index.html b/dev/pages/contribute/code/index.html new file mode 100644 index 000000000..d6c899493 --- /dev/null +++ b/dev/pages/contribute/code/index.html @@ -0,0 +1,2200 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Kapitan Code - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kapitan code

+

Many of our features come from contributions from external collaborators. Please help us improve Kapitan by extending it with your ideas, or help us squash bugs you discover.

+

It's simple, just send us a PR with your improvements!

+

Submitting code

+

We would like ask you to fork +Kapitan project and create a Pull Request +targeting master branch. All submissions, including submissions by project members, require review.

+

Setup

+

We build kapitan using poetry.

+
    +
  1. +

    Install poetry

    +
    pip install poetry
    +
    +
  2. +
  3. +

    Install dependencies

    +
    poetry install --with test
    +
    +

    Poetry creates a virtual environment with the required dependencies installed.

    +
  4. +
  5. +

    Run kapitan with the own compiled code

    +
    poetry run kapitan <your command>
    +
    +
  6. +
+

Because we are using a pinned version of reclass which is added as a submodule into Kapitan's +repository, you need to pull it separately by executing the command below:

+
git submodule update --init
+
+

Troubleshoot

+

Check if gcc is installed:

+
brew install gcc@5
+
+

Testing

+

Run make test to run all tests. If you modify anything in the examples/ folder +make sure you replicate the compiled result of that in tests/test_kubernetes_compiled. +If you add new features, run make test_coverage && make test_formatting to make sure the +test coverage remains at current or better levels and that code formatting is applied.

+

If you would like to evaluate your changes by running your version of Kapitan, you can do +that by running bin/kapitan from this repository or even setting an alias to it.

+
python3 -m unittest tests/test_vault_transit.py
+
+

Code Style

+

To make sure you adhere to the Style Guide for Python (PEP8) +Python Black is used to apply the formatting so make sure you have it installed with pip3 install black.

+

Apply via Git hook

+
    +
  • Run pip3 install pre-commit to install precommit framework.
  • +
  • In the Kapitan root directory, run pre-commit install
  • +
  • Git add/commit any changed files you want.
  • +
+

Apply manually

+

Run make format_codestyle before submitting.

+

Release process

+
    +
  • Create a branch named release-v<NUMBER>. Use v0.*.*-rc.* if you want pre-release versions to be uploaded.
  • +
  • Update CHANGELOG.md with the release changes.
  • +
  • Once reviewed and merged, Github Actions will auto-release.
  • +
  • The merge has to happen with a merge commit not with squash/rebase so that the commit message still mentions kapicorp/release-v* inside.
  • +
+

Packaging extra resources in python package

+

To package any extra resources/files in the pip package, make sure you modify both MANIFEST.in.

+

Leave a comment

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/contribute/documentation/index.html b/dev/pages/contribute/documentation/index.html new file mode 100644 index 000000000..f67644021 --- /dev/null +++ b/dev/pages/contribute/documentation/index.html @@ -0,0 +1,2100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Documentation - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Documentation

+

Our documentation usully prevents new users from adopting Kapitan. Help us improve by contributing with fixes and keeping it up-to-date.

+

Articles

+

Write articles on Kapitan and share your way of working. Inspire others, and reach out to have your article published / endorsed by us.

+

This Website

+

Find something odd? Let us know or change it yourself: you can edit pages of this website on Github by clicking the pencil icon at the top right of this page!

+

Update documentation

+

We use mkdocs to generate our gh-pages from .md files under docs/ folder.

+

Updating our gh-pages is therefore a two-step process.

+

Update the markdown

+

Submit a PR for our master branch that updates the .md file(s). Test how the changes would look like when deployed to gh-pages by serving it on localhost:

+
    +
  1. Edit the strict property in mkdocs.yml and set it to false.
  2. +
  3. make local_serve_documentation
  4. +
  5. Now the documentation site should be available at localhost:8000.
  6. +
+

Submit a PR

+

Once the above PR has been merged, use mkdocs gh-deploy command to push the commit that updates the site content to your own gh-pages branch. Make sure that you already have this gh-pages branch in your fork that is up-to-date with our gh-pages branch such that the two branches share the commit history (otherwise Github would not allow PRs to be created).

+
# locally, on master branch (which has your updated docs)
+COMMIT_MSG="your commit message to replace" make mkdocs_gh_deploy
+
+

After it's pushed, create a PR that targets our gh-pages branch from your gh-pages branch.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/contribute/sponsor/index.html b/dev/pages/contribute/sponsor/index.html new file mode 100644 index 000000000..94c5c07d6 --- /dev/null +++ b/dev/pages/contribute/sponsor/index.html @@ -0,0 +1,2001 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Sponsor Us - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Sponsor Kapitan

+

Do you want to help the project? Great! There are many ways to do it

+

GitHub Sponsors +GitHub Stars

+

We accept donations throught GitHubs Sponsors. Alternatively reach out for other ways to support us.

+

Companies and individuals sponsoring us on a regular base will be recognised and called out on our website

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/contribute/talk/index.html b/dev/pages/contribute/talk/index.html new file mode 100644 index 000000000..5a1e1dafe --- /dev/null +++ b/dev/pages/contribute/talk/index.html @@ -0,0 +1,2006 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Talk about Kapitan - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Talk about Kapitan

+

Our project needs your support to get noticed! Please let everyone know that you are using Kapitan

+
    +
  • Help us grow: give us a star
  • +
  • Join us on kubernetes.slack.com #kapitan(Get invited)
  • +
  • Tweet about us on Twitter . Remember to add @kapitandev to your tweets
  • +
  • Share our website https://kapitan.dev
  • +
  • Write tutorials and blog posts and join the many who have done it already! Get published on the Kapitan Blog
  • +
  • Share what Kapitan does for you and for your company
  • +
  • Inspire your colleagues and network on LinkedIn
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/external_dependencies/index.html b/dev/pages/external_dependencies/index.html new file mode 100644 index 000000000..59e49138c --- /dev/null +++ b/dev/pages/external_dependencies/index.html @@ -0,0 +1,2318 @@ + + + + + + + + + + + + + + + + + + + + + + + + + External dependencies - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

External dependencies

+

Kapitan has the functionality to fetch external dependencies from remote locations.

+

Supported dependencies types are:

+ +

Usage

+

Kapitan by default will not attempt to download any dependency, and rely on what is already available.

+

Basic fetching

+

You can use the fetch option to explicitly fetch the dependencies:

+
+
+
+
kapitan compile --fetch
+
+
+
+
+

.kapitan

+

to make it default, then simply use kapitan compile

+
...
+compile:
+  fetch: true 
+
+
+
+
+
+

This will download the dependencies and store them at their respective output_path.

+

Overwrite local changes

+

When fetching a dependency, Kapitan will refuse to overwrite existing files to preserve your local modifications.

+

Use the force-fetch option to force overwrite your local files in the output_path.

+
+
+
+
kapitan compile --force-fetch
+
+
+
+
+

.kapitan

+

to make it default, then simply use kapitan compile

+
...
+compile:
+  force-fetch: true 
+
+
+
+
+
+

Caching

+

Kapitan also supports caching Use the --cache flag to cache the fetched items in the .dependency_cache directory in the root project directory.

+
```shell
+kapitan compile --cache --fetch
+```
+
+

Defining dependencies

+
+
+
+

Syntax

+
parameters:
+  kapitan:
+    dependencies:
+    - type: git
+      output_path: path/to/dir
+      source: git_url # mkdocs (1)!
+      subdir: relative/path/from/repo/root (optional) # mkdocs (2)!
+      ref: tag, commit, branch etc. (optional) # mkdocs (3)!
+      submodules: true/false (optional) # mkdocs (4)!
+
+
    +
  1. Git types can fetch external git repositories through either HTTP/HTTPS or SSH URLs.
  2. +
  3. Optional supports for cloning just a sub-directory
  4. +
  5. Optional support for accessing them in specific commits and branches (refs).
  6. +
  7. Optional support to disable fetching the submodules of a repo.
  8. +
+
+

Note

+

This type depends on the git binary installed on your system and available to Kapitan.

+
+

Example

+

Say we want to fetch the source code from our kapitan repository, specifically, kapicorp/kapitan/kapitan/version.py. Let's create a very simple target file inventory/targets/kapitan-example.yml.

+
parameters:
+  kapitan:
+    vars:
+      target: kapitan-example
+    dependencies:
+    - type: git
+      output_path: source/kapitan
+      source: git@github.com:kapicorp/kapitan.git
+      subdir: kapitan
+      ref: master
+      submodules: true
+    compile:
+    - input_paths:
+      - source/kapitan/version.py
+      input_type: jinja2 # just to copy the file over to target
+      output_path: .
+
+
+
+

Syntax

+
parameters:
+  kapitan:
+    dependencies:
+    - type: http | https # mkdocs (2)!
+      output_path: path/to/file # mkdocs (1)!
+      source: http[s]://<url> # mkdocs (2)!
+      unpack: True | False # mkdocs (3)! 
+
+
    +
  1. output_path must fully specify the file name. For example:
  2. +
  3. http[s] types can fetch external dependencies available at http:// or https:// URL.
  4. +
  5. archive mode: download and unpack
  6. +
+

Example

+
+
+
+
+
+
+

Say we want to download kapitan README.md file. Since it's on Github, we can access it as https://raw.githubusercontent.com/kapicorp/kapitan/master/README.md. Using the following inventory, we can copy this to our target folder:

+
parameters:
+  kapitan:
+    vars:
+      target: kapitan-example
+    dependencies:
+    - type: https
+      output_path: README.md
+      source: https://raw.githubusercontent.com/kapicorp/kapitan/master/README.md
+    compile:
+    - input_paths:
+      - README.md
+      input_type: jinja2
+      output_path: .
+
+
+
+

Syntax

+
parameters:
+  kapitan:
+    dependencies:
+    - type: helm
+      output_path: path/to/chart
+      source: http[s]|oci://<helm_chart_repository_url>
+      version: <specific chart version>
+      chart_name: <name of chart>
+      helm_path: <helm binary>
+
+

Fetches helm charts and any specific subcharts in the requirements.yaml file.

+

helm_path can be used to specify where the helm binary name or path. +It defaults to the value of the KAPITAN_HELM_PATH environment var or simply to helm if neither is set. +You should specify only if you don't want the default behavior.

+

source can be either the URL to a chart repository, or the URL to a chart on an OCI registry (supported since Helm 3.8.0).

+

Example

+

If we want to download the prometheus helm chart we simply add the dependency to the monitoring target. +We want a specific version 11.3.0 so we put that in.

+
parameters:
+  kapitan:
+    vars:
+      target: monitoring
+    dependencies:
+      - type: helm
+        output_path: charts/prometheus
+        source: https://kubernetes-charts.storage.googleapis.com
+        version: 11.3.0
+        chart_name: prometheus
+    compile:
+      - input_type: helm
+        output_path: .
+        input_paths:
+          - charts/prometheus
+        helm_values:
+        alertmanager:
+            enabled: false
+        helm_params:
+          namespace: monitoring
+          name: prometheus
+
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/copy/index.html b/dev/pages/input_types/copy/index.html new file mode 100644 index 000000000..4f119868c --- /dev/null +++ b/dev/pages/input_types/copy/index.html @@ -0,0 +1,2010 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Copy - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Copy

+

This input type simply copies the input templates to the output directory without any rendering/processing. +For Copy, input_paths can be either a file or a directory: in case of a directory, all the templates in the directory will be copied and outputted to output_path.

+

Supported output types: N/A (no need to specify output_type)

+

Example

+
 kapitan:
+    compile:
+      - input_type: copy
+        ignore_missing: true  # Do not error if path is missing. Defaults to False
+        input_paths:
+          - resources/state/${target_name}/.terraform.lock.hcl
+        output_path: terraform/
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/external/index.html b/dev/pages/input_types/external/index.html new file mode 100644 index 000000000..b15b0618a --- /dev/null +++ b/dev/pages/input_types/external/index.html @@ -0,0 +1,2028 @@ + + + + + + + + + + + + + + + + + + + + + + + + + External - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

External

+

This input type executes an external script or binary. This can be used to manipulate already compiled files or +execute binaries outside of kapitan that generate or manipulate files.

+

For example, ytt is a useful yaml templating tool. It is not built into the kapitan binary, +however, with the external input type, we could specify the ytt binary to be executed with specific arguments +and environment variables.

+

In this example, we're removing a label from a k8s manifests in a directory ingresses and placing it into the compiled target directory.

+
parameters:
+  target_name: k8s-manifests
+  kapitan:
+    vars:
+      target: ${target_name}
+    compile:
+      - input_type: external
+        input_paths:
+          - /usr/local/bin/ytt # path to ytt on system
+        output_path: .
+        args:
+          - -f
+          - ingresses/ # directory with ingresses
+          - -f
+          - ytt/remove.yaml # custom ytt script
+          - ">"
+          - \${compiled_target_dir}/ingresses/ingresses.yaml # final merged result
+
+

Supported output types: N/A (no need to specify output_type)

+

Additionally, the input type supports field env_vars, which can be used to set environment variables for the external command. +By default, the external command doesn't inherit any environment variables from Kapitan's environment. +However, if environment variables $PATH or $HOME aren't set in env_vars, they will be propagated from Kapitan's environment to the external command's environment.

+

Finally, Kapitan will substitute ${compiled_target_dir} in both the command's arguments and the environment variables. +This variable needs to be escaped in the configuration to ensure that reclass won't interpret it as a reclass reference.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/helm/index.html b/dev/pages/input_types/helm/index.html new file mode 100644 index 000000000..7c9a8a40c --- /dev/null +++ b/dev/pages/input_types/helm/index.html @@ -0,0 +1,2152 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Helm - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Input Type | Helm

+

This is a Python binding to helm template command for users with helm charts. This does not require the helm executable, and the templates are rendered without the Tiller server.

+

Unlike other input types, Helm input types support the following additional parameters under kapitan.compile:

+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path>
+      input_type: helm
+      input_paths:
+        - <chart_path>
+      helm_values:
+        <object_with_values_to_override>
+      helm_values_files:
+        - <values_file_path>
+      helm_path: <helm binary>
+      helm_params:
+        name: <chart_release_name>
+        namespace: <substitutes_.Release.Namespace>
+        output_file: <string>
+        validate: true
+        
+
+

helm_values is an object containing values specified that will override the default values in the input chart. This has exactly the same effect as specifying --values custom_values.yml for helm template command where custom_values.yml structure mirrors that of helm_values.

+

helm_values_files is an array containing the paths to helm values files used as input for the chart. This has exactly the same effect as specifying --file my_custom_values.yml for the helm template command where my_custom_values.yml is a helm values file. +If the same keys exist in helm_values and in multiple specified helm_values_files, the last indexed file in the helm_values_files will take precedence followed by the preceding helm_values_files and at the bottom the helm_values defined in teh compile block. +There is an example in the tests. The monitoring-dev(kapitan/tests/test_resources/inventory/targets/monitoring-dev.yml) and monitoring-prd(kapitan/tests/test_resources/inventory/targets/monitoring-prd.yml) targets both use the monitoring(tests/test_resources/inventory/classes/component/monitoring.yml) component. +This component has helm chart input and takes a common.yml helm_values file which is "shared" by any target that uses the component and it also takes a dynamically defined file based on a kapitan variable defined in the target.

+

helm_path can be use to provide the helm binary name or path. +helm_path defaults to the value of KAPITAN_HELM_PATH env var if it is set, else it defaults to helm

+

helm_params correspond to the flags for helm template. Most flags that helm supports can be used here by replacing '-' by '_' in the flag name.

+

Flags without argument must have a boolean value, all other flags require a string value.

+

Special flags:

+
    +
  • name: equivalent of helm template [NAME] parameter. Ignored if name_template is also specified. If neither name_template nor name are specified, the --generate-name flag is used to generate a name.
  • +
  • +

    output_file: name of the single file used to output all the generated resources. This is equivalent to call helm template without specifing output dir. If not specified, each resource is generated into a distinct file.

    +
  • +
  • +

    include_crds and skip_tests: These flags are enabled by default and should be set to false to be removed.

    +
  • +
  • debug: prints the helm debug output in kapitan debug log.
  • +
  • +

    namespace: note that due to the restriction on helm template command, specifying the namespace does not automatically add metadata.namespace property to the resources. Therefore, users are encouraged to explicitly specify it in all resources:

    +
    metadata:
    +  namespace: {{ .Release.Namespace }} # or any other custom values
    +
    +
  • +
+

See the helm doc for further detail.

+

Example

+

Let's use nginx-ingress helm chart as the input. Using kapitan dependency manager, this chart can be fetched via a URL as listed in https://helm.nginx.com/stable/index.yaml.

+

On a side note, https://helm.nginx.com/stable/ is the chart repository URL which you would helm repo add, and this repository should contain index.yaml that lists out all the available charts and their URLs. By locating this index.yaml file, you can locate all the charts available in the repository.

+

We can use version 0.3.3 found at https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz. We can create a simple target file as inventory/targets/nginx-from-chart.yml whose content is as follows:

+
parameters:
+  kapitan:
+    vars:
+      target: nginx-from-chart
+    dependencies:
+    - type: https
+      source: https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz
+      unpack: True
+      output_path: components/charts
+    compile:
+      - output_path: .
+        input_type: helm
+        input_paths:
+          - components/charts/nginx-ingress
+        helm_values:
+          controller:
+            name: my-controller
+            image:
+              repository: custom_repo
+        helm_params:
+          name: my-first-release-name
+          namespace: my-first-namespace
+
+

To compile this target, run:

+
$ kapitan compile --fetch
+Dependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : fetching now
+Dependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : successfully fetched
+Dependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : extracted to components/charts
+Compiled nginx-from-chart (0.07s)
+
+

The chart is fetched before compile, which creates components/charts/nginx-ingress folder that is used as the input_paths for the helm input type. To confirm if the helm_values actually has overridden the default values, we can try:

+
$ grep "my-controller" compiled/nginx-from-chart/nginx-ingress/templates/controller-deployment.yaml
+  name: my-controller
+      app: my-controller
+        app: my-controller
+
+

Building the binding from source

+

Run

+
cd kapitan/inputs/helm
+./build.sh
+
+

This requires Go 1.14.

+

Helm subcharts

+

There is an external dependency manager of type helm which enables you to specify helm +charts to download, including subcharts.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/introduction/index.html b/dev/pages/input_types/introduction/index.html new file mode 100644 index 000000000..ac0b9fe6f --- /dev/null +++ b/dev/pages/input_types/introduction/index.html @@ -0,0 +1,2235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Introduction

+

Note: make sure to read up on inventory before moving on.

+

Phases of the compile command

+

Now that we have a basic understanding of Kapitan inventory, we can talk about the kapitan compile command.

+

The command has five distinct phases:

+
graph LR
+  classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+  classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+  INVENTORY["Inventory"]:::pink
+
+  COMPILE["Compile"]:::pink
+  FINISH["Finish"]:::pink
+  COPY["Copy"]:::pink
+
+
+  subgraph "fetch"
+    F{"fetch?"}
+    FETCH["fetch dependencies"]
+  end 
+
+  subgraph "validate"
+    V{"validate?"}
+    VALIDATE["Validate"]
+  end
+
+  subgraph "reveal"
+    REVEAL["Reveal"]
+    R{"reveal?"}
+  end
+
+  INVENTORY --> F
+  F --> |yes| FETCH
+  FETCH --> COMPILE
+  F ==> |no| COMPILE
+  COMPILE ==> R
+  R ==> |no| COPY
+  R --> |yes| REVEAL
+  REVEAL --> COPY
+  COPY --> V
+  V --> |yes| VALIDATE
+  V ==> |no| FINISH
+  VALIDATE --> FINISH
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StepFlagDescriptionConfiguration
InventoryKapitan uses reclass to render a final version of the inventory.
Fetch--fetchKapitan fetches external dependenciesparameters.kapitan.dependencies
CompileKapitan compiles the input types for each targetparameters.kapitan.compile
Reveal--revealKapitan reveals the secrets directly in the compiled outputparameters.kapitan.secrets
CopyKapitan moves the output files from the tmp directory to /compiled
Validate--validateKapitan validates the schema of compiled output.parameters.kapitan.validate
FinishKapitan has completed all tasks
+

Supported input types

+

Input types can be specified in the inventory under kapitan.compile in the following format:

+
+
+
+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path_in_target_dir>
+      input_type: jinja2 
+      input_params: # (1)! 
+      input_paths:
+        - directory/
+        - file
+        - globbed/path/*
+
+
    +
  1. a dict passed to the template
  2. +
+

Please see Jinja

+
+
+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path_in_target_dir>
+      input_type: jsonnet 
+      prune: false # (1)! 
+      input_paths:
+        - directory/
+        - file
+        - globbed/path/*
+      output_type: [`yaml` | `json`]
+
+
    +
  1. (Default: global --prune)
  2. +
+
+
+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path_in_target_dir>
+      input_type: kadet 
+      prune: false # (1)! 
+      input_paths:
+        - directory/
+        - file
+        - globbed/path/*
+      output_type: [`yaml` | `json`]
+
+
    +
  1. (Default: global --prune)
  2. +
+

Please see Kadet

+
+
+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path_in_target_dir>
+      input_type: helm 
+      prune: false # (1)! 
+      input_paths:
+        - directory/
+        - file
+        - globbed/path/*
+      output_type: <output_type_specific_to_input_type>
+
+
    +
  1. (Default: global --prune)
  2. +
+
+
+
parameters:
+  kapitan:
+    compile:
+    - output_path: <output_path_in_target_dir>
+      input_type: copy 
+      prune: false # (1)! 
+      input_paths:
+        - directory/
+        - file
+        - globbed/path/*
+      output_type: <output_type_specific_to_input_type>
+
+
    +
  1. (Default: global --prune)
  2. +
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/jinja/index.html b/dev/pages/input_types/jinja/index.html new file mode 100644 index 000000000..c7e6eaa92 --- /dev/null +++ b/dev/pages/input_types/jinja/index.html @@ -0,0 +1,2344 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Jinja - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Input Type | Jinja2

+

This input type is probably the most simple input type to use: it is very versatile and is commonly used to create scripts and documentation files.

+

It renders jinja2 templates.

+

Example configuration

+

Here's some configuration from the nginx example

+
+

examples/kubernetes/inventory/classes/component/nginx-common.yml

+
  templates: #(1)!
+    - docs/nginx/README.md
+    - components/nginx-deploy.sh
+
+  kapitan:
+    compile:
+      - output_path: . #(2)!
+        input_type: jinja2
+        input_paths: ${templates} #(3)!
+
+
    +
  1. We define a list with all the templates we want to compile with this input type
  2. +
  3. Then input type will render the files a the root of the target compiled folder e.g. compiled/${target_name}
  4. +
  5. We pass the list as input_paths
  6. +
+
+
+

Notice how make use of variable interpolation to use the convenience of a list to add all the files we want to compile. + You can now simply add to that list from any other place in the inventory that calls that class.

+
+
    +
  • input_paths can either be a file, or a directory: in case of a directory, all the templates in the directory will be rendered.
  • +
  • input_params (optional) can be used to pass extra parameters, helpful when needing to use a similar template for multiple components in the same target.
  • +
+

Documentation

+

We usually store documentation templates under the templates/docs directory.

+
+
+

examples/kubernetes/docs/nginx/README.md

+
{% set i = inventory.parameters %}
+
+# Welcome to the README!
+
+Target *{{ i.target_name }}* is running:
+
+* {{ i.nginx.replicas }} replicas of *nginx* running nginx image {{ i.nginx.image }}
+* on cluster {{ i.cluster.name }}
+
+
+
+

Compiled result

+
# Welcome to the README!
+
+Target *minikube-nginx-jsonnet* is running:
+
+* 1 replicas of *nginx* running nginx image nginx:1:15.8
+* on cluster minikube
+
+
+
+

Scripts

+

When we use Jinja to render scripts, we tend to call them "canned scripts" to indicate that these scripts have everything needed to run without extra parameters.

+

We usually store script templates under the templates/scripts directory.

+
+
+

examples/kubernetes/components/nginx-deploy.sh

+
#!/bin/bash -e
+DIR=$(dirname ${BASH_SOURCE[0]})
+{% set i = inventory.parameters %} #(1)!
+
+KUBECTL="kubectl -n {{i.namespace}}" #(2)!
+
+# Create namespace before anything else
+${KUBECTL} apply -f ${DIR}/pre-deploy/namespace.yml
+
+for SECTION in manifests
+do
+  echo "## run kubectl apply for ${SECTION}"
+  ${KUBECTL} apply -f ${DIR}/${SECTION}/ | column -t
+done
+
+
    +
  1. We import the inventory as a Jinja variable
  2. +
  3. We use to set the namespace explicitly
  4. +
+
+
+

Compiled result

+
#!/bin/bash -e
+DIR=$(dirname ${BASH_SOURCE[0]})
+ #(1)!
+
+KUBECTL="kubectl -n minikube-nginx-jsonnet" #(2)!
+
+# Create namespace before anything else
+${KUBECTL} apply -f ${DIR}/pre-deploy/namespace.yml
+
+for SECTION in manifests
+do
+  echo "## run kubectl apply for ${SECTION}"
+  ${KUBECTL} apply -f ${DIR}/${SECTION}/ | column -t
+done
+
+
    +
  1. The script is now a "canned script" and ready to be used for this specif target.
  2. +
  3. You can see that the namespace has been replaced with the target's one.
  4. +
+
+
+

Accessing the inventory

+

Templates will be provided at runtime with 3 variables:

+
    +
  • inventory: To access the inventory for that specific target.
  • +
  • inventory_global: To access the inventory of all targets.
  • +
  • input_params: To access the optional dictionary provided to the input type.
  • +
+
+

Use of inventory_global

+

inventory_global can be used to generate a "global" README.md that contains a link to all generated targets. +

| *Target*                                                               |
+|------------------------------------------------------------------------|
+{% for target in inventory_global | sort() %}
+{% set p = inventory_global[target].parameters %}
+|[{{target}}](../{{target}}/docs/README.md)                              |
+{% endfor %}
+

+
+

Compiled result

+
| *Target*                                                               |
+|------------------------------------------------------------------------|
+| [argocd](../argocd/docs/README.md)                                     |
+| [dev-sockshop](../dev-sockshop/docs/README.md)                         |
+| [echo-server](../echo-server/docs/README.md)                           |
+| [examples](../examples/docs/README.md)                                 |
+| [gke-pvm-killer](../gke-pvm-killer/docs/README.md)                     |
+| [global](../global/docs/README.md)                                     |
+| [guestbook-argocd](../guestbook-argocd/docs/README.md)                 |
+| [kapicorp-demo-march](../kapicorp-demo-march/docs/README.md)           |
+| [kapicorp-project-123](../kapicorp-project-123/docs/README.md)         |
+| [kapicorp-terraform-admin](../kapicorp-terraform-admin/docs/README.md) |
+| [mysql](../mysql/docs/README.md)                                       |
+| [postgres-proxy](../postgres-proxy/docs/README.md)                     |
+| [pritunl](../pritunl/docs/README.md)                                   |
+| [prod-sockshop](../prod-sockshop/docs/README.md)                       |
+| [sock-shop](../sock-shop/docs/README.md)                               |
+| [tesoro](../tesoro/docs/README.md)                                     |
+| [tutorial](../tutorial/docs/README.md)                                 |
+| [vault](../vault/docs/README.md)                                       |
+
+
+
+

Jinja2 custom filters

+

We support the following custom filters for use in Jinja2 templates:

+
+
+
+
+
+
+
+

SHA256 hashing of text

+

{{ text | sha256 }}

+
+
+
+
+

Dump text as YAML

+

{{ text | yaml }}

+
+
+
+
+

Dump text as TOML

+

{{ text | toml }}

+
+
+
+
+

base64 encode text

+

{{ text | b64encode }}

+
+
+
+
+

base64 decode text

+

{{ text | b64decode }}

+
+
+
+
+
+
+
+
+
+
+

return datetime object for string

+

{{ "2019-03-07 13:37:00" | to_datetime }}

+
+
+
+
+

return current date string for format

+

{{ "%a, %d %b %Y %H:%M" | strftime }}

+
+
+
+
+
+
+
+
+
+
+

perform a re.sub returning a string

+

{{ hello world | regex_replace(pattern="world", replacement="kapitan")}}

+
+
+
+
+

escape all regular expressions special characters from string

+

{{ "+s[a-z].*" | regex_escape}}

+
+
+
+
+

perform re.search and return the list of matches or a backref

+

{{ hello world | regex_search("world.*")}}

+
+
+
+
+

perform re.findall and return the list of matches as array

+

{{ hello world | regex_findall("world.*")}}

+
+
+
+
+
+
+
+

return list of matched regular files for glob

+

{{ ./path/file* | fileglob }}

+
+
+
+
+

return the bool for value

+

{{ yes | bool }}

+
+
+
+
+

value ? true_val : false_val

+

{{ condition | ternary("yes", "no")}}

+
+
+
+
+

randomly shuffle elements of a list

+

{{ [1, 2, 3, 4, 5] | shuffle }}

+
+
+
+
+

reveal ref/secret tag only if compile --reveal flag is set

+

{{ "?{base64:my_ref}" | reveal_maybe}}

+
+
+
+
+
+

Tip

+

You can also provide path to your custom filter modules in CLI. By default, you can put your filters in lib/jinja2_filters.py and they will automatically get loaded.

+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/jsonnet/index.html b/dev/pages/input_types/jsonnet/index.html new file mode 100644 index 000000000..42a57c928 --- /dev/null +++ b/dev/pages/input_types/jsonnet/index.html @@ -0,0 +1,2212 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Jsonnet - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Input Type | Jsonnet

+

Jsonnet is a superset of json format that includes features such as conditionals, variables and imports. Refer to jsonnet docs to understand how it works.

+

Note: unlike jinja2 templates, one jsonnet template can output multiple files (one per object declared in the file).

+

Accessing the inventory

+

Typical jsonnet files would start as follows:

+
local kap = import "lib/kapitan.libjsonnet"; #(1)!
+local inv = kap.inventory(); #(2)!
+local p = inv.parameters; #(3)!
+
+{
+    "data_java_opts": p.elasticsearch.roles.data.java_opts, #(4)!
+}
+
+
    +
  1. Import the Kapitan inventory library.
  2. +
  3. Assign the content of the full inventory for this specific target to the inv variable.
  4. +
  5. Assign the content of the inventory.parameters to a variable p for convenience.
  6. +
  7. Use the p variable fo access a specific intentory value
  8. +
+

Note: The dictionary keys of the jsonnet object are used as filenames for the generated output files. +If your jsonnet is not a dictionary, but is a valid json(net) object, then the output filename will be the same as the input filename. E.g. 'my_string' is inside templates/input_file.jsonnet so the generated output file will be named input_file.json for example and will contain "my_string".

+

Jinja2 templating

+

Kapitan allows you to compile a Jinja template from within Jsonnet:

+
local kap = import "lib/kapitan.libjsonnet";
+
+{
+    "jon_snow": kap.jinja2_template("templates/got.j2", { is_dead: false }),
+}
+
+

Callback functions

+

In addition, importing kapitan.libjsonnet makes available the following native_callback functions gluing reclass to jsonnet (amongst others):

+
+
+
+
+

returns a dictionary with the inventory for target

+
+
+
+
+

renders the jinja2 file with context specified

+
+
+
+
+
+
+
+

returns a json string of the specified yaml file

+
+
+
+
+

returns a list of json strings of the specified yaml file

+
+
+
+
+

returns a string yaml from a json string

+
+
+
+
+

returns a string yaml stream from a json string

+
+
+
+
+
+
+
+
+
+
+

reads the file specified

+
+
+
+
+

returns informative object if a file exists

+
+
+
+
+

returns a list of file in a dir

+
+
+
+
+

returns an object with keys - file_name and values - file contents

+
+
+
+
+
+
+
+

returns sha256 of string

+
+
+
+
+

returns base64 encoded gzip of obj

+
+
+
+
+

validates obj with schema, returns object with 'valid' and 'reason' keys

+
+
+
+
+

Jsonschema validation

+

Given the follow example inventory:

+
mysql:
+  storage: 10G
+  storage_class: standard
+  image: mysql:latest
+
+

The yaml inventory structure can be validated with the new jsonschema() function:

+
local schema = {
+    type: "object",
+    properties: {
+        storage: { type: "string", pattern: "^[0-9]+[MGT]{1}$"},
+        image: { type: "string" },
+    }
+};
+// run jsonschema validation
+local validation = kap.jsonschema(inv.parameters.mysql, schema);
+// assert valid, otherwise error with validation.reason
+assert validation.valid: validation.reason;
+
+

If validation.valid is not true, it will then fail compilation and display validation.reason.

+
+

Fails validation because storage has an invalid pattern (10Z)

+
Jsonnet error: failed to compile /code/components/mysql/main.jsonnet:
+RUNTIME ERROR: '10Z' does not match '^[0-9]+[MGT]{1}$'
+
+Failed validating 'pattern' in schema['properties']['storage']:
+    {'pattern': '^[0-9]+[MGT]{1}$', 'type': 'string'}
+
+On instance['storage']:
+    '10Z'
+
+/code/mysql/main.jsonnet:(19:1)-(43:2)
+
+Compile error: failed to compile target: minikube-mysql
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/kadet/index.html b/dev/pages/input_types/kadet/index.html new file mode 100644 index 000000000..81ff061c5 --- /dev/null +++ b/dev/pages/input_types/kadet/index.html @@ -0,0 +1,2067 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Kadet - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Input Type | Kadet

+

Kadet is an extensible input type for Kapitan that enables you to generate templates using Python.

+

The key benefit being the ability to utilize familiar programing principles while having access to Kapitan's powerful inventory system.

+

A library that defines resources as classes using the Base Object class is required. These can then be utilized within components to render output.

+

The following functions are provided by the class BaseObj().

+

Method definitions:

+
    +
  • new(): Provides parameter checking capabilities
  • +
  • body(): Enables in-depth parameter configuration
  • +
+

Method functions:

+
    +
  • root(): Defines values that will be compiled into the output
  • +
  • need(): Ability to check & define input parameters
  • +
  • update_root(): Updates the template file associated with the class
  • +
+

A class can be a resource such as a Kubernetes Deployment as shown here:

+
class Deployment(BaseObj): # (1)!
+    def new(self): # (2)!
+        self.need("name", "name string needed")
+        self.need("labels", "labels dict needed")
+        self.need("containers", "containers dict needed")
+        self.update_root("lib/kubelib/deployment.yml")
+
+    def body(self): # (3)!
+        self.root.metadata.name = self.kwargs.name # (4)!
+        self.root.metadata.namespace = inv.parameters.target_name
+        self.root.spec.template.metadata.labels = self.kwargs.labels
+        self.root.spec.template.spec.containers = self.kwargs.containers
+
+
    +
  1. The deployment is an BaseObj() which has two main functions.
  2. +
  3. new(self) is used to perform parameter validation & template compilation
  4. +
  5. body(self) is utilized to set those parameters to be rendered.
  6. +
  7. self.root.metadata.name is a direct reference to a key in the corresponding yaml.
  8. +
+

Kadet supports importing libraries as you would normally do with Python. These libraries can then be used by the components to generate the required output.

+
...
+kubelib = kadet.load_from_search_paths("kubelib") #(1)!
+...
+name = "nginx"
+labels = kadet.BaseObj.from_dict({"app": name})
+nginx_container = kubelib.Container( #(2)!
+    name=name, image=inv.parameters.nginx.image, ports=[{"containerPort": 80}]
+)
+...
+def main():
+    output = kadet.BaseObj() #(3)!
+    output.root.nginx_deployment = kubelib.Deployment(name=name, labels=labels, containers=[nginx_container]) #(4)!
+    output.root.nginx_service = kubelib.Service( #(5)!
+        name=name, labels=labels, ports=[svc_port], selector=svc_selector
+    )
+    return output #(6)!
+
+
    +
  1. We import a library called kubelib using load_from_search_paths()
  2. +
  3. We use kubelib to create a Container
  4. +
  5. We create an output of type BaseObj and we will be updating the root element of this output.
  6. +
  7. We use kubelib to create a Deployment kind. The Deployment makes use of the Container created.
  8. +
  9. We use kubelib to create a Service kind.
  10. +
  11. We return the object. Kapitan will render everything under output.root
  12. +
+

Kadet uses a library called addict to organise the parameters inline with the yaml templates. +As shown above we create a BaseObject() named output. We update the root of this output with the data structure returned from kubelib. This output is what is then returned to kapitan to be compiled into the desired output type.

+

For a deeper understanding please refer to github.com/kapicorp/kadet

+

Supported output types:

+
    +
  • yaml (default)
  • +
  • json
  • +
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/input_types/remove/index.html b/dev/pages/input_types/remove/index.html new file mode 100644 index 000000000..457f490bd --- /dev/null +++ b/dev/pages/input_types/remove/index.html @@ -0,0 +1,2035 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Remove - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Remove

+

This input type simply removes files or directories. This can be helpful if you can't control particular files +generated during other compile inputs.

+

For example, to remove a file named copy_target, specify an entry to input_paths, compiled/${kapitan:vars:target}/copy_target.

+
parameters:
+  target_name: removal
+  kapitan:
+    vars:
+      target: ${target_name}
+    compile:
+      - input_type: copy
+        input_paths:
+          - copy_target
+        output_path: .
+      # test removal of a file
+      - input_type: remove
+        input_paths:
+          - compiled/${kapitan:vars:target}/copy_target
+        output_path: .
+
+

As a reminder, each input block within the compile array is run sequentially for a target in Kapitan. If we reversed the order of the inputs above like so:

+
parameters:
+  target_name: removal
+  kapitan:
+    vars:
+      target: ${target_name}
+    compile:
+      - input_type: remove
+        input_paths:
+          - compiled/${kapitan:vars:target}/copy_target
+        output_path: .
+      - input_type: copy
+        input_paths:
+          - copy_target
+        output_path: .
+
+

The first input block would throw an error because the copy input command hasn't run yet to produce the file being removed by the remove input block.

+

Supported output types: N/A (no need to specify output_type)

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/advanced/index.html b/dev/pages/inventory/advanced/index.html new file mode 100644 index 000000000..06a0716e6 --- /dev/null +++ b/dev/pages/inventory/advanced/index.html @@ -0,0 +1,2070 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Advanced - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Advanced Inventory Features

+

Target labels

+

Kapitan allows you to define labels in your inventory, which can then be used to group together targets with similar labels.

+

For instance you could define the following:

+
+

Defines a class to add the customer label to selected targets

+
+

inventory/classes/type/customer_project.yml

+
parameters:
+  customer_name: ${target_name} # Defaults to the target_name
+  kapitan:
+    labels:
+      customer: ${customer_name}
+
+
+

Apply the class to the target for customer acme

+
+

inventory/targets/customers/acme.yml

+
classes:
+...
+- type.customer_project
+
+parameters:
+...
+
+
+

You can now selectively compile targets for customer acme using the following (see see Labels for more details )

+
+
kapitan compile -l customer=acme
+Compiled acme (0.06s)
+Compiled acme-documentation (0.09s)
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/classes/index.html b/dev/pages/inventory/classes/index.html new file mode 100644 index 000000000..8ca99a55f --- /dev/null +++ b/dev/pages/inventory/classes/index.html @@ -0,0 +1,2096 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Classes - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Classes

+

Usage

+

The next thing you want to learn about the inventory are classes. A class is a yaml file containing a fragment of yaml that we want to import and merge into the inventory.

+

Classes are fragments of yaml: feature sets, commonalities between targets. Classes let you compose your Inventory from smaller bits, eliminating duplication and exposing all important parameters from a single, logically organised place. As the Inventory lets you reference other parameters in the hierarchy, classes become places where you can define something that will then get referenced from another section of the inventory, allowing for composition.

+

Classes are organised under the inventory/classes directory substructure. +They are organised hierarchically in subfolders, and the way they can be imported into a target or other classes depends on their location relative to the inventory/classes directory.

+

Importing classes

+

To import a class from within another file of the Inventory, you can follow these instructions:

+
    +
  • take the file path relative to the inventory/classes/ directory
  • +
  • remove the .yml file extension
  • +
  • replace / with .
  • +
+

For example, this will import the class inventory/classes/applications/sock-shop.yaml

+
classes:
+- applications.sock-shop
+
+

Definition

+

Let's take a look at the common class which appears in the example above:

+

As explained, because the common.yaml is directly under the inventory/classes subdirectory, it can be imported directly into a target with:

+
classes:
+- common
+
+

If we open the file, we find another familiar yaml fragment.

+
+

inventory/classes/common.yml

+
classes:
+- kapitan.common
+
+parameters:
+  namespace: ${target_name}
+  target_name: ${_reclass_:name:short}
+
+
+

Notice that this class includes an import definition for another class, kapitan.common. We've already learned this means that kapitan will import a file on disk called inventory/classes/kapitan/common.yml

+

You can also see that in the parameters section we now encounter a new syntax which unlocks another powerful inventory feature: parameters interpolation!

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/introduction/index.html b/dev/pages/inventory/introduction/index.html new file mode 100644 index 000000000..c181b46c5 --- /dev/null +++ b/dev/pages/inventory/introduction/index.html @@ -0,0 +1,2068 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Overview

+ + +

The Inventory is a core component of Kapitan: this section aims to explain how it works and how to best take advantage of it.

+

The Inventory is a hierarchical YAML based structure which you use to capture anything that you want to make available to Kapitan, so that it can be passed on to its templating engines.

+

The first concept to learn about the Inventory is the target. A target is a file, found under the inventory/targets substructure, that tells Kapitan what you want to compile. It will usually map to something you want to do with Kapitan.

+

For instance, you might want to define a target for each environment that you want to deploy using Kapitan.

+

The Inventory lets you also define and reuse common configurations through YAML files that are referred to as classes: by listing classes into target, their content gets merged together and allows you to compose complex configurations without repetitions.

+

By combining target and classes, the Inventory becomes the SSOT for your whole configuration, and learning how to use it will unleash the real power of Kapitan.

+
+

Info

+

The Kapitan Inventory is based on an open source project called reclass and you can find the full documentation on our Github clone. However we discourage you to look directly at the reclass documentation before you learn more about Kapitan, because Kapitan uses a fork of reclass and greatly simplifies the reclass experience.

+
+
+

Info

+

Kapitan allows users to switch the inventory backend to reclass-rs. You can switch the backend to reclass-rs by passing --inventory-backend=reclass-rs on the command line. Alternatively, you can define the backend in the .kapitan config file.

+

See the reclass-rs inventory backend documentation for more details.

+
+
+

Note

+

Kapitan enforces very little structure for the Inventory, so that you can adapt it to your specific needs: this might be overwhelming at the beginning: don’t worry, we will explain best practice and give guidelines soon.

+
+

By default, Kapitan will search for its Inventory under inventory/classes and inventory/targets.

+
inventory/
+├── classes
+│   ├── applications
+│   ├── components
+│   ├── features
+│   ├── kapitan
+│   ├── projects
+│   └── terraform
+└── targets
+    ├── examples
+    ├── kapicorp
+    └── terraform
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/parameters_interpolation/index.html b/dev/pages/inventory/parameters_interpolation/index.html new file mode 100644 index 000000000..65ef8e6cd --- /dev/null +++ b/dev/pages/inventory/parameters_interpolation/index.html @@ -0,0 +1,2047 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Parameters Interpolation - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Parameters Interpolation

+
+

Note

+

as a shorthand, when we encounter deep yaml structures like the following:

+
parameters:
+  components:
+    nginx:
+      image: nginx:latest
+
+

Usually when we want to talk about the image subkey, we normally use either of the following:

+
    +
  • parameters.components.nginx.image
  • +
  • components.nginx.image
  • +
+

However, when used in parameter expansion, remember to:

+
    +
  • replace the . with :
  • +
  • omit the parameters initial key which is implied
  • +
  • wrap it into the ${} variable interpolation syntax
  • +
+

The correct way to reference parameters.nginx.image then becomes ${components:nginx:image}.

+
+

The Inventory allows you to refer to other values defined elsewhere in the structure, using parameter interpolation.

+

Given the example:

+
parameters:
+  cluster:
+    location: europe
+
+  application:
+    location: ${cluster:location}
+
+  namespace: ${target_name}
+  target_name: dev
+
+

Here we tell Kapitan that:

+
    +
  • namespace should take the same value defined in target_name
  • +
  • target_name should take the literal string dev
  • +
  • application.location should take the same value as defined in cluster.location
  • +
+

It is important to notice that the inventory can refer to values defined in other classes, as long as they are imported by the target. So for instance with the following example

+
classes:
+  - project.production
+
+  parameters:
+    application:
+      location: ${cluster.location}
+
+

Here in this case application.location refers to a value location which has been defined elsewhere, perhaps (but not necessarily) in the project.production class.

+

Also notice that the class name (project.production) is not in any ways influencing the name or the structed of the yaml it imports into the file

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/reclass-rs/index.html b/dev/pages/inventory/reclass-rs/index.html new file mode 100644 index 000000000..f7d52dc16 --- /dev/null +++ b/dev/pages/inventory/reclass-rs/index.html @@ -0,0 +1,2137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + reclass-rs Backend - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

The reclass-rs inventory backend

+

Overview

+

Reclass-rs is a reimplementation of Kapitan's Reclass fork in Rust. +Please note that the Rust implementation doesn't support all the features of Kapitan's Reclass fork yet.

+

However, reclass-rs improves rendering time for the inventory significantly, especially if you're making heavy use of parameter references in class includes. +If some of the Reclass features or options that you're using are missing in reclass-rs, don't hesitate to open an issue in the reclass-rs project.

+

Installation

+

The reclass-rs Python package is an optional dependency of Kapitan. +You can install it as follows:

+
pip install kapitan[reclass-rs]
+
+

Usage

+

To use the reclass-rs inventory backend, you need to pass --inventory-backend=reclass-rs on the command line. +If you want to permanently switch to the reclass-rs inventory backend, you can select the inventory backend in the .kapitan config file:

+
global:
+  inventory-backend: reclass-rs
+
+

Performance comparison

+

For the performance comparison, a real Kapitan inventory which makes heavy use of parameter interpolation in class includes was rendered with both Reclass and reclass-rs. +The example inventory that was used for the performance comparison contains 325 classes and 56 targets. +The example inventory renders to a total of 25MB of YAML.

+

Reclass

+
$ time kapitan inventory -v --inventory-backend=reclass > inv.yml
+[ ... some output omitted ... ]
+kapitan.resources DEBUG    Using reclass as inventory backend
+kapitan.inventory.inv_reclass DEBUG    Inventory reclass: No config file found. Using reclass inventory config defaults
+kapitan.inventory.inv_reclass DEBUG    Inventory rendering with reclass took 0:01:06.037057
+
+real    1m23.840s
+user    1m23.520s
+sys     0m0.287s
+
+

Reclass takes 1 minute and 6 seconds to render the example inventory. +The rest of the runtime (roughly 18 seconds) is spent in writing the resulting 25MB of YAML to the output file.

+

reclass-rs

+
$ time kapitan inventory -v --inventory-backend=reclass-rs > inv-rs.yml
+[ ... some output omitted ... ]
+kapitan.resources DEBUG    Using reclass-rs as inventory backend
+kapitan.inventory.inv_reclass DEBUG    Inventory reclass: No config file found. Using reclass inventory config defaults
+reclass-config.yml entry 'storage_type=yaml_fs' not implemented yet, ignoring...
+reclass-config.yml entry 'inventory_base_uri=./inventory' not implemented yet, ignoring...
+reclass-config.yml entry 'allow_none_override=true' not implemented yet, ignoring...
+kapitan.inventory.inv_reclass_rs DEBUG    Inventory rendering with reclass-rs took 0:00:01.717107
+
+real    0m19.921s
+user    0m35.586s
+sys     0m1.066s
+
+

reclass-rs takes 1.7 seconds to render the example inventory. +The rest of the runtime (roughly 18 seconds) is spent in writing the resulting 25MB of YAML to the output file.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/inventory/targets/index.html b/dev/pages/inventory/targets/index.html new file mode 100644 index 000000000..749e11dd7 --- /dev/null +++ b/dev/pages/inventory/targets/index.html @@ -0,0 +1,2138 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Targets - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Targets

+

Usage

+

A target is a file that lives under the inventory/targets subdirectory, and that tells Kapitan what you want it to do for you.

+

Kapitan will recognise all YAML files in the inventory/targets subtree as targets.

+
+

Note

+

Only use .yml as extension for Inventory files. .yaml will not be recognised as a valid Inventory file.

+
+

What you do with a target is largely up to you and your setup. Common examples:

+
    +
  • clusters: Map each target to a cluster, capturing all configurations needed for a given cluster. For instance: targets/clusters/production-cluster1.yml
  • +
  • applications: When using Kapitan to manage Kubernetes applications, you might define a target for everything that you would normally deploy in a single namespace, including all its resources, scripts, secrets and documentation. For instance: targets/mysql.yml
  • +
  • environments: You might have want to define a different target for each environment you have, like dev.yml, test.yml and prod.yml
  • +
  • cloud projects: When working with Terraform, it may be convenient to group target by cloud project. For instance: targets/gcp/projects/engineering-prod.yml.
  • +
  • single tenancy: When deploying a single-tenancy application, you might combine the approaches above, and have a target acme.yml that is used to define both Terraform and Kubernetes resources for a given tenant, perhaps also with some ArgoCD or Spinnaker pipelines to go with it.
  • +
+
+

Example

+

If you have configured your kapitan repository like in Quick Start instructions, you can run the commands we give during the course of this documentation.

+
+

kapitan compile

+
+
Compiled gke-pvm-killer (0.09s)
+Compiled vault (0.18s)
+Compiled pritunl (0.17s)
+Compiled mysql (0.07s)
+Compiled examples (0.25s)
+Compiled postgres-proxy (0.06s)
+Compiled echo-server (0.08s)
+Compiled global (0.05s)
+Compiled tutorial (0.09s)
+Compiled guestbook-argocd (0.08s)
+Compiled sock-shop (0.30s)
+Compiled kapicorp-demo-march (0.04s)
+Compiled kapicorp-project-123 (0.03s)
+Compiled kapicorp-terraform-admin (0.08s)
+Compiled tesoro (0.09s)
+Compiled prod-sockshop (0.34s)
+Compiled dev-sockshop (0.41s)
+Compiled argocd (2.53s)
+
+

When you run kapitan compile, you instruct Kapitan to generate for each given target a directory under compiled with the same name. Under this directory you will find all the files that have been generated by Kapitan for that target.

+
+

tree compiled/mysql/

+
+
compiled/mysql/
+├── argocd
+├── docs
+│   ├── mysql-readme.md
+│   └── README.md
+├── manifests
+│   ├── mysql-bundle.yml
+│   ├── mysql-config.yml
+│   ├── mysql-namespace.yml
+│   └── mysql-secret.yml
+├── pre-deploy
+├── rabbitmq
+├── scripts
+└── terraform
+
+7 directories, 6 files
+
+
+

Definition

+

A typical target might look like this:

+
+

inventory/targets/acme/dev.yaml

+
classes:
+  - common
+  - components.acme.frontend
+  - components.acme.backend
+
+parameters:
+  target_name: dev
+
+
+

Note that it is made of 2 sections:

+
    +
  • classes is a list of class files you will want to import.
  • +
  • parameters allows for local override of what is unique to this target.
  • +
+
+

Info

+

the kapitan key under the root parameters is reserved for kapitan usage. Some examples:

+
parameters:
+  kapitan:
+    compile:      # input types configuration section
+    dependencies: # dependencies configuration section to download resources
+    secrets:      # secret encryption/decryption configuration section
+    validate:     # items which indicate which compiled output to validate
+    vars:         # which are also passed down to input types as context
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/kapitan_overview/index.html b/dev/pages/kapitan_overview/index.html new file mode 100644 index 000000000..c0b51dbf4 --- /dev/null +++ b/dev/pages/kapitan_overview/index.html @@ -0,0 +1,2195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Core Concepts - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kapitan Overview

+

Kapitan at a glance

+

Kapitan is a powerful configuration management tool designed to help engineers manage complex systems through code. It centralizes and simplifies the management of configurations with a structured approach that revolves around a few core concepts.

+
+Kapitan diagram +
%%{ init: { securityLevel: 'loose'} }%%
+graph LR
+    classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+    classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;
+    TARGET1 --> KAPITAN
+    TARGET2 --> KAPITAN
+    TARGETN --> KAPITAN
+    KAPITAN --> EXTERNAL
+    KAPITAN --> GENERATORS
+    KAPITAN --> HELM
+    KAPITAN --> JINJA
+    KAPITAN --> JSONNET
+    KAPITAN --> KADET
+    EXTERNAL --> OUTPUT
+    GENERATORS --> OUTPUT
+    JINJA --> OUTPUT
+    JSONNET --> OUTPUT
+    KADET --> OUTPUT
+    HELM --> OUTPUT
+    GKMS --> REFERENCES
+    AWSKMS --> REFERENCES
+    VAULT --> REFERENCES
+    OTHER --> REFERENCES
+    PLAIN --> REFERENCES
+    OUTPUT --> TARGETN_OUTPUT
+    OUTPUT --> TARGET1_OUTPUT 
+    OUTPUT --> TARGET2_OUTPUT 
+    REFERENCES --> KAPITAN
+    TARGET1_OUTPUT --> DOCUMENTATION 
+    TARGET1_OUTPUT --> KUBERNETES
+    TARGET1_OUTPUT --> SCRIPTS 
+    TARGET1_OUTPUT --> TERRAFORM
+    CLASSES --> TARGET1
+    CLASSES --> TARGET2
+    CLASSES --> TARGETN
+
+    subgraph "Inventory"
+        CLASSES[classes]
+        TARGET1(["target 1"]):::pink
+        TARGET2(["target 2"])
+        TARGETN(["target N"])
+    end
+
+    subgraph "references"
+        direction TB
+        GKMS["GCP KMS"]
+        AWSKMS["AWS KMS"]
+        VAULT["Hashicorp Vault"]
+        OTHER["others"]
+        PLAIN["plain"]
+        REFERENCES["references"]
+    end
+
+    KAPITAN(("<img src='/images/kapitan_logo.png'; width='80'/>")):::blue
+    click EXTERNAL "/compile#external"
+
+    subgraph "Input Types" 
+        EXTERNAL["external"]
+        GENERATORS["generators"]
+        HELM["helm"]
+        JINJA["jinja"]
+        JSONNET["jsonnet"]
+        KADET["kadet"]
+    end
+
+    OUTPUT{{"compiled output"}}:::blue
+
+
+
+    subgraph " "
+        TARGET1_OUTPUT([target1]):::pink
+        DOCUMENTATION["docs"]
+        KUBERNETES["manifests"]
+        SCRIPTS["scripts"]
+        TERRAFORM["terraform"]
+    end
+
+    TARGET2_OUTPUT(["target 2"])
+    TARGETN_OUTPUT(["target N"])
+
+
+

Let's explore these concepts in a way that's accessible to new users:

+

Inventory

+

At the core of Kapitan lies the Inventory, a structured database of variables meticulously organized in YAML files. +This hierarchical setup serves as the single source of truth (SSOT) for your system's configurations, making it easier to manage and reference the essential components of your infrastructure. Whether you're dealing with Kubernetes configurations, Terraform resources, or even business logic, the Inventory allows you to define and store these elements efficiently. This central repository then feeds into Kapitan's templating engines, enabling seamless reuse across various applications and services.

+ + +

Input Types

+

Kapitan takes the information stored in the Inventory and brings it to life through its templating engines upon compilation. This process transforms static data into dynamic configurations, capable of generating a wide array of outputs like Kubernetes manifests, Terraform plans, documentation, and scripts. It's about making your configurations work for you, tailored to the specific needs of your projects.

+

See Input Types for more

+

Generators

+

Generators offer a straightforward entry point into using Kapitan, requiring minimal to no coding experience. These are essentially pre-made templates that allow you to generate common configuration files, such as Kubernetes manifests, directly from your Inventory data. Kapitan provides a wealth of resources, including the Kapitan Reference GitHub repository and various blog posts, to help users get up and running with generators.

+

Kadet

+

For those looking to leverage the full power of Kapitan, Kadet introduces a method to define and reuse complex configurations through Python. This internal library facilitates the creation of JSON and YAML manifests programmatically, offering a higher degree of customization and reuse. Kadet empowers users to craft intricate configurations with the simplicity and flexibility of Python.

+

References

+

Kapitan References provide a secure way to store passwords, settings, and other essential data within your project. Think of them as special code placeholders.

+
    +
  • Flexibility: Update a password once, and Kapitan updates it everywhere automatically.
  • +
  • Organization: References tidy up your project, especially when you're juggling multiple settings or environments (dev, staging, production). +Security: Protect sensitive information like passwords with encryption
  • +
+
+

Tip

+

Use Tesoro, our Kubernetes Admission Controller, to complete your integration with Kubernetes for secure secret decryption on-the-fly.

+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/pages/remote_repositories/index.html b/dev/pages/remote_repositories/index.html new file mode 100644 index 000000000..bfc1ea2ee --- /dev/null +++ b/dev/pages/remote_repositories/index.html @@ -0,0 +1,2216 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Remote repositories - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Remote Inventories

+

Kapitan is capable of recursively fetching inventory items stored in remote locations and copy it to the specified output path. This feature can be used by specifying those inventory items in classes or targets under parameters.kapitan.inventory. Supported types are:

+ +

Class items can be specified before they are locally available as long as they are fetched in the same run. Example of this is given below.

+

Git type

+

Git types can fetch external inventories available via HTTP/HTTPS or SSH URLs. This is useful for fetching repositories or their sub-directories, as well as accessing them in specific commits and branches (refs).

+

Note: git types require git binary on your system.

+

Definition

+
parameters:
+  kapitan:
+    inventory:
+    - type: git
+      output_path: path/to/dir
+      source: git_url
+      subdir: relative/path/from/repo/root (optional)
+      ref: tag, commit, branch etc. (optional)
+
+

Example

+

Lets say we want to fetch a class from our kapitan repository, specifically +kapicorp/kapitan/tree/master/examples/docker/inventory/classes/dockerfiles.yml.

+

Lets create a simple target file docker.yml

+
+
+

Note

+

external dependencies are used to fetch dependency items in this example.

+
+
+

targets/docker.yml

+
classes:
+  - dockerfiles
+parameters:
+  kapitan:
+    vars:
+      target: docker
+    inventory:
+      - type: git
+        source: https://github.com/kapicorp/kapitan
+        subdir: examples/docker/inventory/classes/
+        output_path: classes/
+    dependencies:
+      - type: git
+        source: https://github.com/kapicorp/kapitan
+        subdir: examples/docker/components
+        output_path: components/
+      - type: git
+        source: https://github.com/kapicorp/kapitan
+        subdir: examples/docker/templates
+        output_path: templates/
+  dockerfiles:
+  - name: web
+    image: amazoncorretto:11
+  - name: worker
+    image: amazoncorretto:8
+
+
+
+
kapitan compile --fetch
+
+
+
+click to expand output +
[WARNING] Reclass class not found: 'dockerfiles'. Skipped!
+[WARNING] Reclass class not found: 'dockerfiles'. Skipped!
+Inventory https://github.com/kapicorp/kapitan: fetching now
+Inventory https://github.com/kapicorp/kapitan: successfully fetched
+Inventory https://github.com/kapicorp/kapitan: saved to inventory/classes
+Dependency https://github.com/kapicorp/kapitan: saved to components
+Dependency https://github.com/kapicorp/kapitan: saved to templates
+Compiled docker (0.11s)
+
+
+
+

http type

+

http[s] types can fetch external inventories available at http:// or https:// URL.

+

Definition

+
parameters:
+  kapitan:
+    inventory:
+    - type: http | https
+      output_path: full/path/to/file.yml
+      source: http[s]://<url>
+      unpack: True | False # False by default
+
+

Example

+
+
+

targets/mysql-generator-fetch.yml

+
classes:
+  - common
+  - kapitan.generators.kubernetes
+parameters:
+  kapitan:
+    inventory:
+      - type: https
+        source: https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml
+        output_path: classes/kapitan/generators/kubernetes.yml
+  components:
+    mysql:
+      image: mysql
+
+
+
+
kapitan compile --fetch
+
+
+
+click to expand output +
./kapitan compile -t mysql-generator-fetch --fetch
+Inventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: fetching now
+Inventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: successfully fetched
+Inventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: saved to inventory/classes/kapitan/generators/kubernetes.yml
+
+...
+cut
+...
+
+Compiled mysql-generator-fetch (0.06s)
+
+
+
+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/proposals/index.html b/dev/proposals/index.html new file mode 100644 index 000000000..51a4c4186 --- /dev/null +++ b/dev/proposals/index.html @@ -0,0 +1,2058 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Proposals - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kapitan proposals

+

Introduction

+

Proposals can be submitted for review by performing a pull request against this repository. If approved the proposal will +be published here for further review by the Kapitan community. Proposals tend to be improvements or design consideration +for new features.

+

Existing proposals

+

Kadet input type

+

External dependency management

+

Helm charts input type

+

Kubernetes scheme validation

+

Portable standalone Kapitan executable

+

Ref types redesign

+

Hashicorp vault secrets

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/references/index.html b/dev/references/index.html new file mode 100644 index 000000000..b17f88c79 --- /dev/null +++ b/dev/references/index.html @@ -0,0 +1,3007 @@ + + + + + + + + + + + + + + + + + + + + + + + + + References - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + +

Kapitan References (formally Secrets)

+

One of the motivations behing Kapitan's design is that we believe that everything about your setup should be tracked, and Kapitan takes this to the extreme. Sometimes, however, we have to manage values that we do not think they belong to the Inventory: perhaps they are either too variable (for instance, a Git commit sha that changes with every build) or too sensitive, like a password or a generic secret, and then they should always be encrypted.

+

Kapitan has a built in support for References, which you can use to manage both these use cases.

+

Kapitan References supports the following backends:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
BackendDescriptionEncrypted
plainPlain text, (e.g. commit sha)
base64Base64, non confidential but with base64 encoding
gpgSupport for https://gnupg.org/
gkmsGCP KMS
awskmsAWS KMS
azkmsAzure Key Vault
envEnvironment
vaultkvHashicorp Vault (RO)
vaulttransitHashicorp Vault (encrypt, decrypt, update_key, rotate_key)
+

Setup

+

Some reference backends require configuration, both in the Inventory and to configure the actual backend.

+
+

Get started

+

If you want to get started with references but don't want to deal with the initial setup, you can use the plain and base64 reference types. These are great for demos, but we will see they are extremely helpful even in Production environments.

+
+

Danger

+

Both plain and base64 references do not support encryption: they are intended for development or demo purposes only. +DO NOT use plain or base64 for storing sensitive information!

+
+
+
+

Backend configuration

+

Configuration for each backend varies, and it is perfomed by configuring the inventory under parameters.kapitan.secrets.

+
+
+
+
+

No configuration needed

+
+
+
+
+

No configuration needed

+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      gpg:
+        recipients:
+          - name: example@kapitan.dev
+            fingerprint: D9234C61F58BEB3ED8552A57E28DC07A3CBFAE7C
+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      gkms:
+        key: 'projects/<project>/locations/<location>/keyRings/<keyRing>/cryptoKeys/<key>'
+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      awskms:
+        key: 'alias/nameOfKey'
+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      azkms:
+        key: 'https://<keyvault-name>.vault.azure.net/keys/<object-name>/<object-version>'
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{env:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      vaultkv:
+        VAULT_ADDR: http://127.0.0.1:8200
+        auth: token
+        mount: secret
+
+
+
+
parameters:
+  kapitan:
+    secrets:
+      vaulttransit:
+        VAULT_ADDR: https://vault.example.com
+        VAULT_TOKEN: s.mqWkI0uB6So0aHH0v0jyDs97
+        VAULT_SKIP_VERIFY: "False"  # Recommended
+        auth: token
+        mount: mytransit
+        key: 2022-02-13-test
+
+
+
+
+
+
+

Organize your configuration in classes

+

Just like any other inventory parameters, these configurations can be inherited from a common class or defined per target.

+
+

inventory/classes/common.yml

+
classes:
+- security.backend
+...
+
+
+
+

inventory/classes/security/backend.yml

+
parameters:
+  kapitan:
+    secrets:
+      <backend>: <configuration>
+
+
+
+
+ADVANCED: Mix-and-Match backends +

Remember that you can use multiple backends at the same time, and also use variable interpolation for an even greater flexibility.

+

In a multi-cloud setup, for instance, you could configure both GKMS

+
+

GCP configuration

+
+

inventory/classes/cloud/gcp.yml

+
classes:
+- security.backends.gkms
+...
+
+
+
+

inventory/classes/security/backends/gkms.yml

+
# Configuration for GCP targets
+parameters:
+  backend: gkms
+  kapitan:
+    secrets:
+      gkms: <configuration>
+
+
+
+
+

AWS configuration

+
+

inventory/classes/security/backends/awskms.yml

+
# Configuration for AWS targets
+parameters:
+  backend: awskms
+  kapitan:
+    secrets:
+      awskms: <configuration>
+
+
+
+

inventory/classes/cloud/aws.yml

+
classes:
+- security.backends.awskms
+...
+
+
+
+

Now because they both set the parameters.backend variable, you can define a reference whose backend changes based on what class is assigned to the target

+
+

inventory/targets/cloud/gcp/acme.yml

+
classes:
+- cloud.aws
+
+parameters:
+  ...
+  mysql:
+    # the secret backend will change based on the cloud assigned to this target
+    root_password: ?{${backend}:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+

Define references

+
+

References can be defined in the inventory following the syntax spaces added for clarity:

+

?{ <backend_id> : <reference_path> }

+
+expand for advanced features +

The syntax also supports for process functions and create_functions which we will discuss later, which brings the full syntax to

+
+

?{ <backend_id> : <reference_path> } |<process_function> ||<create_function>

+
+
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{plain:targets/${target_name}/mysql/root_password}
+  ...
+
+
+

not encrypted

+

This reference type does not support encryption: it is intended for non sensitive data only. DO NOT use plain for storing sensitive information!

+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{base64:targets/${target_name}/mysql/root_password}
+  ...
+
+
+

not encrypted

+

This reference type does not support encryption: it is intended for non sensitive data only. DO NOT use base64 for storing sensitive information!

+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{gpg:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{gkms:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{awskms:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{azkms:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{env:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+

read-only +

parameters:
+  ...
+  mysql:
+    root_password: ?{vaultkv:targets/${target_name}/mysql/root_password}
+  ...
+
+read-write: +
parameters:
+  ...
+  mysql:
+    root_password: ?{vaultkv:targets/${target_name}/mysql/root_password:mount:path/in/vault:mykey}
+  ...
+

+
+
+
parameters:
+  ...
+  mysql:
+    root_password: ?{vaulttransit:targets/${target_name}/mysql/root_password}
+  ...
+
+
+
+
+
+

Assign a value

+

Manually

+

You can assign values to your reference using the command line. Both reading from a file and pipes are supported.

+
+

Please Note

+

Kapitan will fail compilation if a reference is not found. Please see how to assign a value automatically in the next section

+
+
+
+
+
+
kapitan refs --write plain:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write plain:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write base64:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write base64:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write gpg:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write gpg:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write gkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write gkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write azkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write azkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+
+

Setting default value only

+

The env backend works in a slightly different ways, as it allows you to reference environment variables at runtime.

+

For example, for a reference called {?env:targets/envs_defaults/mysql_port_${target_name}}, Kapitan would look for an environment variable called KAPITAN_ENV_mysql_port_${TARGET_NAME}.

+

If that variable cannot be found in the Kapitan environment, the default will be taken from the refs/targets/envs_defaults/mysql_port_${TARGET_NAME} file instead.

+
+
kapitan refs --write env:refs/targets/envs_defaults/mysql_port_${TARGET_NAME} -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write env:refs/targets/envs_defaults/mysql_port_${TARGET_NAME} -t ${TARGET_NAME} -f -
+
+
+
+
kapitan refs --write vaultkv:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>
+
+

which also works with pipes

+
cat input_file | kapitan refs --write vaultkv:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+
+
+

This backend expects the value to be stored as a key:value pair.

+
echo "a_key:a_value" | kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -
+
+

When reading from disk, the input file should be formatted accordingly.

+
+
+
+
+

Automatically

+

Kapitan has built in capabilities to initialise its references on creation, using an elegant combination of primary and secondary functions. This is extremely powerful because it allows for you to make sure they are always initialised with sensible values.

+

primary functions

+

To automate the creation of the reference, you can add one of the following primary functions to the reference tag by using the syntax ||primary_function:param1:param2

+

For instance, to automatically initialise a reference with a random string with a lenght of 32 characters, you can use the random primary function

+
```yaml
+parameters:
+  ...
+  mysql:
+    root_password: ?{${backend}:targets/${target_name}/mysql/root_password||random:str:32}
+  ...
+```
+
+
+

Initialise non existent references

+

The first operator here || is more similar to a logical OR.

+
    +
  • If the reference file does not exist, Kapitan will use the function to initialise it
  • +
  • If the reference file exists, no functions will run.
  • +
+
+

Automate secret rotation with ease

+

You can take advantage of it to implement easy rotation of secrets. Simply delete the reference files, and run kapitan compile: let Kapitan do the rest.

+
+
+
+
+
+
+
+
+
+

Generator function for alphanumeric characters, will be url-token-safe

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:str}
+
+
+
+
+

generator function for digits (0-9)

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:int}
+
+
+
+
+

generator function for lowercase letters (a-z)

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:loweralpha}
+
+
+
+
+

generator function for uppercase letters (A-Z)

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:upperalpha}
+
+
+
+
+

generator function for lowercase letters and numbers (a-z and 0-9)

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:loweralphanum}
+
+
+
+
+

generator function for uppercase letters and numbers (A-Z and 0-9)

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:upperalphanum}
+
+
+
+
+

generator function for alphanumeric characters and given special characters

+
+
?{${backend}:targets/${target_name}/mysql/root_password||random:special}
+
+
+
+
+
+
+
+
+
+
+

Generates an RSA 4096 private key (PKCS#8). You can optionally pass the key size

+
+
?{${backend}:targets/${target_name}/private_key||rsa}
+
+
+
+
+

Generates a ed25519 private key (PKCS#8)

+
+
?{${backend}:targets/${target_name}/private_key||ed25519}
+
+
+
+
+

Derives the public key from a revealed private key

+
+
?{${backend}:targets/${target_name}/private_key||rsa}
+?{${backend}:targets/${target_name}/public_key||reveal:targets/${target_name}/private_key|publickey}
+
+
+
+
+

DEPRECATED: use ||publickey

+
+
+
+
+
+
+
+

Generates a base64 encoded pair of username:password

+
+
?{${backend}:targets/${target_name}/apache_basicauth||basicauth:username:password}
+
+
+
+
+

Reveals the content of another reference, useful when deriving public keys or a reference requires a different encoding or the same value.

+
+
?{${backend}:targets/${target_name}/secret||random:str}
+?{${backend}:targets/${target_name}/base64_secret||reveal:targets/${target_name}/secret|base64}
+
+
+

attention when rotating secrets used with reveal

+

If you use reveal to initialise a reference, like my_reference||reveal:source_reference the my_reference will not be automatically updated if source_reference changes. +Please make sure you also re-initialise my_reference correctly

+
+
+
+
+

secondary functions

+
+
+
+
+

base64 encodes your reference

+
+
?{${backend}:targets/${target_name}/secret||random:str|base64}
+
+
+
+
+

sha256 hashes your reference +param1: salt

+
+
?{${backend}:targets/${target_name}/secret||random:str|sha256}
+
+
+
+
+

Reveal references

+

You can reveal the secrets referenced in the outputs of kapitan compile via:

+
```shell
+kapitan refs --reveal -f path/to/rendered/template
+```
+
+

For example, compiled/minikube-mysql/manifests/mysql_secret.yml with the following content:

+
```yaml
+apiVersion: v1
+data:
+  MYSQL_ROOT_PASSWORD: ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de}
+  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:targets/minikube-mysql/mysql/password_sha256:122d2732}
+kind: Secret
+metadata:
+  annotations: {}
+  labels:
+    name: example-mysql
+  name: example-mysql
+  namespace: minikube-mysql
+type: Opaque
+```
+
+

can be revealed as follows:

+
```shell
+kapitan refs --reveal -f compiled/minikube-mysql/manifests/mysql_secret.yml
+```
+
+

This will substitute the referenced secrets with the actual decrypted secrets stored at the referenced paths and display the file content.

+

You can also use:

+
```shell
+kapitan refs --reveal --ref-file refs/targets/all-glob/mysql/password
+```
+
+

or

+
```shell
+kapitan refs --reveal --tag "?{base64:targets/all-glob/mysql/password}"
+# or
+kapitan refs --reveal --tag "?{base64:targets/all-glob/mysql/password:3192c15c}"
+```
+
+

for more convenience.

+

Embedded refs

+

Please refer to the CLI reference

+

YAML SubVars References

+

Kapitan is also able to use access specific keys in YAML content by using subvars.

+

For instance given a reference plain:larder with content:

+
```yaml
+food:
+  apples: 1
+```
+
+

I could now have an inventory variable like:

+
```yaml
+parameters:
+  number_of_apples: ?{plain:larder@food.apple}
+```
+
+

Using subvars to ingest yaml from command line tools

+

Subvars can have a very practical use for storing YAML outputs coming straight from other tools. For instance, I could use the GCP gcloud command to get all the information about a cluster, and write it into a reference

+
```shell
+gcloud container clusters describe \
+  --project ${TARGET_NAME}-project \
+  gke-cluster --zone europe-west1 --format yaml \
+    | kapitan refs --write plain:clusters/${TARGET_NAME}/cluster -t ${TARGET_NAME} -f -
+```
+
+

knowing the output of gcloud to produce yaml that contain the following values:

+
```yaml
+...
+name: gke-cluster
+releaseChannel:
+  channel: REGULAR
+selfLink: https://container.googleapis.com/v1/projects/kapicorp/locations/europe-west1/clusters/gke-cluster
+...
+```
+
+

I can not reference the link to the cluster in the inventory using:

+
```yaml
+parameters:
+  cluster:
+    name: ?{plain:clusters/${target_name}/cluster@name} 
+    release_channel: ?{plain:clusters/${target_name}/cluster@releaseChannel.channel}
+    link: ?{plain:clusters/${target_name}/cluster@selfLink}
+```
+
+

Combined with a Jinja template, I could write automatically documentation containing the details of the clusters I use.

+
```text
+{% set p = inventory.parameters %}
+# Documentation for {{p.target_name}}
+
+Cluster [{{p.cluster.name}}]({{p.cluster.link}}) has release channel {{p.cluster.release_channel}}
+```
+
+

Hashicorp Vault

+

vaultkv

+

Considering a key-value pair like my_key:my_secret in the path secret/foo/bar in a kv-v2(KV version 2) secret engine on the vault server, to use this as a secret use:

+
```shell
+echo "foo/bar:my_key"  | kapitan refs --write vaultkv:path/to/secret_inside_kapitan -t <target_name> -f -
+```
+
+

To write a secret in the vault with kapitan use a ref tag with following structure:

+
```yaml
+parameters:
+  ...
+  secret:
+    my_secret: ?{vaultkv:targets/${target_name}/mypath:mount:path/in/vault:mykey||<functions>}
+  ...
+```
+
+

Leave mount empty to use the specified mount from vault params from the inventory (see below). Same applies to the path/in/vault where the ref path in kapitan gets taken as default value.

+

Parameters in the secret file are collected from the inventory of the target we gave from CLI -t <target_name>. If target isn't provided then kapitan will identify the variables from the environment when revealing secret.

+

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. +Extra parameters that can be defined in inventory are:

+
    +
  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • +
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • +
  • +

    engine: secret engine used, either kv-v2 or kv (default kv-v2) +Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID.

    +
    parameters:
    +  kapitan:
    +    secrets:
    +      vaultkv:
    +        auth: userpass
    +        engine: kv-v2
    +        mount: team-alpha-secret
    +        VAULT_ADDR: http://127.0.0.1:8200
    +        VAULT_NAMESPACE: CICD-alpha
    +        VAULT_SKIP_VERIFY: false
    +        VAULT_CLIENT_KEY: /path/to/key
    +        VAULT_CLIENT_CERT: /path/to/cert
    +
    +
  • +
+

vaulttransit

+

Considering a key-value pair like my_key:my_secret in the path secret/foo/bar in a transit secret engine on the vault server, to use this as a secret use:

+
```shell
+echo "any.value:whatever-you_may*like"  | kapitan refs --write vaulttransit:my_target/to/secret_inside_kapitan -t <target_name> -f -
+```
+
+

Parameters in the secret file are collected from the inventory of the target we gave from CLI -t <target_name>. If target isn't provided then kapitan will identify the variables from the environment when revealing secret.

+

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. +Extra parameters that can be defined in inventory are:

+
    +
  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • +
  • mount: specify the mount point of key's path. e.g if path=my_mount (default transit)
  • +
  • crypto_key: Name of the encryption key defined in vault
  • +
  • +

    always_latest: Always rewrap ciphertext to latest rotated crypto_key version +Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID.

    +
    parameters:
    +  kapitan:
    +    vars:
    +      target: my_target
    +      namespace: my_namespace
    +    secrets:
    +      vaulttransit:
    +        VAULT_ADDR: http://vault.example.com:8200
    +        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY
    +        VAULT_SKIP_VERIFY: "True"
    +        auth: token
    +        mount: transit
    +        crypto_key: new_key
    +        always_latest: False
    +parameters:
    +  target_name: secrets
    +  kapitan:
    +    secrets:
    +      vaulttransit:
    +        VAULT_ADDR: http://127.0.0.1:8200
    +        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY
    +        VAULT_SKIP_VERIFY: "True"
    +        auth: token
    +        mount: transit
    +        crypto_key: key
    +        always_latest: False
    +
    +
  • +
+

Azure KMS Secret Backend

+

To encrypt secrets using keys stored in Azure's Key Vault, a key_id is required to identify an Azure key object uniquely. +It should be of the form https://{keyvault-name}.vault.azure.net/{object-type}/{object-name}/{object-version}.

+

Defining the KMS key

+

This is done in the inventory under parameters.kapitan.secrets.

+
```yaml
+parameters:
+  kapitan:
+    vars:
+      target: ${target_name}
+      namespace: ${target_name}
+    secrets:
+      azkms:
+        key: 'https://<keyvault-name>.vault.azure.net/keys/<object-name>/<object-version>'
+```
+
+

The key can also be specified using the --key flag

+

Creating a secret

+

Secrets can be created using any of the methods described in the "creating your secret" section.

+

For example, if the key is defined in the prod target file

+
```shell
+echo "my_encrypted_secret" | kapitan refs --write azkms:path/to/secret_inside_kapitan -t prod -f -
+```
+
+

Using the --key flag and a key_id

+
```shell
+echo "my_encrypted_secret" | kapitan refs --write azkms:path/to/secret_inside_kapitan --key=<key_id> -f -
+```
+
+

Referencing and revealing a secret

+

Secrets can be referenced and revealed in any of the ways described above.

+

For example, to reveal the secret stored at path/to/secret_inside_kapitan

+
```shell
+kapitan refs --reveal --tag "?{azkms:path/to/secret_inside_kapitan}"
+```
+
+

Note: Cryptographic algorithm used for encryption is rsa-oaep-256.

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/related/index.html b/dev/related/index.html new file mode 100644 index 000000000..49db09349 --- /dev/null +++ b/dev/related/index.html @@ -0,0 +1,2003 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Related Projects - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/search/search_index.json b/dev/search/search_index.json new file mode 100644 index 000000000..96a68416f --- /dev/null +++ b/dev/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Kapitan: Keep your ship together","text":"

Kapitan aims to be your one-stop configuration management solution to help you manage the ever growing complexity of your configurations by enabling Platform Engineering and GitOps workflows.

It streamlines complex deployments across heterogeneous environments while providing a secure and adaptable framework for managing infrastructure configurations. Kapitan's inventory-driven model, powerful templating capabilities, and native secret management tools offer granular control, fostering consistency, reducing errors, and safeguarding sensitive data.

Empower your team to make changes to your infrastructure whilst maintaining full control, with a GitOps approach and full transparency.

  • Join the community #kapitan
  • Help us grow: give us a star or even better sponsor our project
"},{"location":"#why-do-i-need-kapitan","title":"Why do I need Kapitan?","text":""},{"location":"#video-tutorials-to-get-started","title":"Video Tutorials to get started","text":"

Kapitan Youtube Channel

InventoryReferencesHelm and Generators integrationRawkode: Introduction to Kapitan

"},{"location":"ADOPTERS/","title":"Who uses Kapitan","text":"

If you're using Kapitan in your organization, please let us know by adding to this list on the docs/ADOPTERS.md file.

"},{"location":"FAQ/","title":"FAQ","text":""},{"location":"FAQ/#why-do-i-need-kapitan","title":"Why do I need Kapitan?","text":"

See Why do I need Kapitan?

"},{"location":"FAQ/#ask-your-question","title":"Ask your question","text":"

Please use the comments facility below to ask your question

"},{"location":"getting_started/","title":"Kapitan Overview","text":""},{"location":"getting_started/#setup-your-installation","title":"Setup your installation","text":"

Using our reference repositories you can easily get started with Kapitan

"},{"location":"getting_started/#examples-repository","title":"Examples repository","text":"

kapicorp/kapitan-reference repository is meant show you many working examples of things you can do with Kapitan. You can use this to get familiar with Kapitan

$ git clone git@github.com:kapicorp/kapitan-reference.git kapitan-templates\n$ cd kapitan-templates\n\n$ ./kapitan compile\nCompiled postgres-proxy (1.51s)\nCompiled tesoro (1.70s)\nCompiled echo-server (1.64s)\nCompiled mysql (1.67s)\nCompiled gke-pvm-killer (1.17s)\nCompiled prod-sockshop (4.74s)\nCompiled dev-sockshop (4.74s)\nCompiled tutorial (1.68s)\nCompiled global (0.76s)\nCompiled examples (2.60s)\nCompiled pritunl (2.03s)\nCompiled sock-shop (4.36s)\n
"},{"location":"getting_started/#minimal-repository","title":"Minimal repository","text":"

Using cruft based cookiecutter

pip3 install cruft\n
cruft create http://github.com/kapicorp/kapitan-reference --checkout cookiecutter --no-input\nDependency https://github.com/kapicorp/generators.git: saved to system/lib\nDependency https://github.com/kapicorp/generators.git: saved to system/generators/kubernetes\nDependency https://github.com/kapicorp/generators.git: saved to system/generators/terraform\nRendered inventory (1.74s)\nCompiled echo-server (0.14s)\n
"},{"location":"getting_started/#running-kapitan","title":"running Kapitan","text":"

recommended

"},{"location":"getting_started/#kapitan-wrapper-script","title":"kapitan wrapper script","text":"

If you use the provided repository, we already package a kapitan shell script that wraps the docker command to run Kapitan

$ ./kapitan compile\nCompiled postgres-proxy (1.51s)\nCompiled tesoro (1.70s)\nCompiled echo-server (1.64s)\nCompiled mysql (1.67s)\nCompiled gke-pvm-killer (1.17s)\nCompiled prod-sockshop (4.74s)\nCompiled dev-sockshop (4.74s)\nCompiled tutorial (1.68s)\nCompiled global (0.76s)\nCompiled examples (2.60s)\nCompiled pritunl (2.03s)\nCompiled sock-shop (4.36s)\n
"},{"location":"getting_started/#other-installation-methods","title":"Other installation methods","text":""},{"location":"getting_started/#docker","title":"Docker","text":"

recommended

"},{"location":"getting_started/#docker_1","title":"Docker","text":"LinuxMac
alias kapitan=\"docker run -t --rm -u $(id -u) -v $(pwd):/src:delegated kapicorp/kapitan\"\nkapitan -h\n
alias kapitan=\"docker run -t --rm -v $(pwd):/src:delegated kapicorp/kapitan\"\nkapitan -h\n
"},{"location":"getting_started/#pip","title":"Pip","text":""},{"location":"getting_started/#install-python","title":"Install Python","text":"LinuxMac
sudo apt-get update && sudo apt-get install -y python3.8-dev python3-pip python3-yaml\n
brew install python3 libyaml libmagic\n
"},{"location":"getting_started/#install-kapitan-using-pip","title":"Install Kapitan using pip","text":""},{"location":"getting_started/#user","title":"User","text":"LinuxMac

kapitan will be installed in $HOME/.local/lib/python3.7/bin

pip3 install --user --upgrade kapitan\n

kapitan will be installed in $HOME/Library/Python/3.7/bin

pip3 install --user --upgrade kapitan\n
"},{"location":"getting_started/#system-wide","title":"System-wide","text":"

not recommended

sudo pip3 install --upgrade kapitan\n
"},{"location":"proposals/","title":"Kapitan proposals","text":"","tags":["community"]},{"location":"proposals/#introduction","title":"Introduction","text":"

Proposals can be submitted for review by performing a pull request against this repository. If approved the proposal will be published here for further review by the Kapitan community. Proposals tend to be improvements or design consideration for new features.

","tags":["community"]},{"location":"proposals/#existing-proposals","title":"Existing proposals","text":"

Kadet input type

External dependency management

Helm charts input type

Kubernetes scheme validation

Portable standalone Kapitan executable

Ref types redesign

Hashicorp vault secrets

","tags":["community"]},{"location":"references/","title":"Kapitan References (formally Secrets)","text":"

One of the motivations behing Kapitan's design is that we believe that everything about your setup should be tracked, and Kapitan takes this to the extreme. Sometimes, however, we have to manage values that we do not think they belong to the Inventory: perhaps they are either too variable (for instance, a Git commit sha that changes with every build) or too sensitive, like a password or a generic secret, and then they should always be encrypted.

Kapitan has a built in support for References, which you can use to manage both these use cases.

Kapitan References supports the following backends:

Backend Description Encrypted plain Plain text, (e.g. commit sha) base64 Base64, non confidential but with base64 encoding gpg Support for https://gnupg.org/ gkms GCP KMS awskms AWS KMS azkms Azure Key Vault env Environment vaultkv Hashicorp Vault (RO) vaulttransit Hashicorp Vault (encrypt, decrypt, update_key, rotate_key)"},{"location":"references/#setup","title":"Setup","text":"

Some reference backends require configuration, both in the Inventory and to configure the actual backend.

Get started

If you want to get started with references but don't want to deal with the initial setup, you can use the plain and base64 reference types. These are great for demos, but we will see they are extremely helpful even in Production environments.

Danger

Both plain and base64 references do not support encryption: they are intended for development or demo purposes only. DO NOT use plain or base64 for storing sensitive information!

Backend configuration

Configuration for each backend varies, and it is perfomed by configuring the inventory under parameters.kapitan.secrets.

plainbase64gpggkmsawskmsazkmsenvvaultkvvaulttransit

No configuration needed

No configuration needed

parameters:\n  kapitan:\n    secrets:\n      gpg:\n        recipients:\n          - name: example@kapitan.dev\n            fingerprint: D9234C61F58BEB3ED8552A57E28DC07A3CBFAE7C\n
parameters:\n  kapitan:\n    secrets:\n      gkms:\n        key: 'projects/<project>/locations/<location>/keyRings/<keyRing>/cryptoKeys/<key>'\n
parameters:\n  kapitan:\n    secrets:\n      awskms:\n        key: 'alias/nameOfKey'\n
parameters:\n  kapitan:\n    secrets:\n      azkms:\n        key: 'https://<keyvault-name>.vault.azure.net/keys/<object-name>/<object-version>'\n
parameters:\n  ...\n  mysql:\n    root_password: ?{env:targets/${target_name}/mysql/root_password}\n  ...\n
parameters:\n  kapitan:\n    secrets:\n      vaultkv:\n        VAULT_ADDR: http://127.0.0.1:8200\n        auth: token\n        mount: secret\n
parameters:\n  kapitan:\n    secrets:\n      vaulttransit:\n        VAULT_ADDR: https://vault.example.com\n        VAULT_TOKEN: s.mqWkI0uB6So0aHH0v0jyDs97\n        VAULT_SKIP_VERIFY: \"False\"  # Recommended\n        auth: token\n        mount: mytransit\n        key: 2022-02-13-test\n

Organize your configuration in classes

Just like any other inventory parameters, these configurations can be inherited from a common class or defined per target.

inventory/classes/common.yml

classes:\n- security.backend\n...\n

inventory/classes/security/backend.yml

parameters:\n  kapitan:\n    secrets:\n      <backend>: <configuration>\n
ADVANCED: Mix-and-Match backends

Remember that you can use multiple backends at the same time, and also use variable interpolation for an even greater flexibility.

In a multi-cloud setup, for instance, you could configure both GKMS

GCP configuration

inventory/classes/cloud/gcp.yml

classes:\n- security.backends.gkms\n...\n

inventory/classes/security/backends/gkms.yml

# Configuration for GCP targets\nparameters:\n  backend: gkms\n  kapitan:\n    secrets:\n      gkms: <configuration>\n

AWS configuration

inventory/classes/security/backends/awskms.yml

# Configuration for AWS targets\nparameters:\n  backend: awskms\n  kapitan:\n    secrets:\n      awskms: <configuration>\n

inventory/classes/cloud/aws.yml

classes:\n- security.backends.awskms\n...\n

Now because they both set the parameters.backend variable, you can define a reference whose backend changes based on what class is assigned to the target

inventory/targets/cloud/gcp/acme.yml

classes:\n- cloud.aws\n\nparameters:\n  ...\n  mysql:\n    # the secret backend will change based on the cloud assigned to this target\n    root_password: ?{${backend}:targets/${target_name}/mysql/root_password}\n  ...\n
"},{"location":"references/#define-references","title":"Define references","text":"

References can be defined in the inventory following the syntax spaces added for clarity:

?{ <backend_id> : <reference_path> }

expand for advanced features

The syntax also supports for process functions and create_functions which we will discuss later, which brings the full syntax to

?{ <backend_id> : <reference_path> } |<process_function> ||<create_function>

plainbase64gpggkmsawskmsazkmsenvvaultkvvaulttransit
parameters:\n  ...\n  mysql:\n    root_password: ?{plain:targets/${target_name}/mysql/root_password}\n  ...\n

not encrypted

This reference type does not support encryption: it is intended for non sensitive data only. DO NOT use plain for storing sensitive information!

parameters:\n  ...\n  mysql:\n    root_password: ?{base64:targets/${target_name}/mysql/root_password}\n  ...\n

not encrypted

This reference type does not support encryption: it is intended for non sensitive data only. DO NOT use base64 for storing sensitive information!

parameters:\n  ...\n  mysql:\n    root_password: ?{gpg:targets/${target_name}/mysql/root_password}\n  ...\n
parameters:\n  ...\n  mysql:\n    root_password: ?{gkms:targets/${target_name}/mysql/root_password}\n  ...\n
parameters:\n  ...\n  mysql:\n    root_password: ?{awskms:targets/${target_name}/mysql/root_password}\n  ...\n
parameters:\n  ...\n  mysql:\n    root_password: ?{azkms:targets/${target_name}/mysql/root_password}\n  ...\n
parameters:\n  ...\n  mysql:\n    root_password: ?{env:targets/${target_name}/mysql/root_password}\n  ...\n

read-only

parameters:\n  ...\n  mysql:\n    root_password: ?{vaultkv:targets/${target_name}/mysql/root_password}\n  ...\n
read-write:
parameters:\n  ...\n  mysql:\n    root_password: ?{vaultkv:targets/${target_name}/mysql/root_password:mount:path/in/vault:mykey}\n  ...\n

parameters:\n  ...\n  mysql:\n    root_password: ?{vaulttransit:targets/${target_name}/mysql/root_password}\n  ...\n
"},{"location":"references/#assign-a-value","title":"Assign a value","text":""},{"location":"references/#manually","title":"Manually","text":"

You can assign values to your reference using the command line. Both reading from a file and pipes are supported.

Please Note

Kapitan will fail compilation if a reference is not found. Please see how to assign a value automatically in the next section

plainbase64gpggkmsawskmsazkmsenvvaultkvvaulttransit
kapitan refs --write plain:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write plain:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n
kapitan refs --write base64:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write base64:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n
kapitan refs --write gpg:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write gpg:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n
kapitan refs --write gkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write gkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n
kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n
kapitan refs --write azkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write azkms:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n

Setting default value only

The env backend works in a slightly different ways, as it allows you to reference environment variables at runtime.

For example, for a reference called {?env:targets/envs_defaults/mysql_port_${target_name}}, Kapitan would look for an environment variable called KAPITAN_ENV_mysql_port_${TARGET_NAME}.

If that variable cannot be found in the Kapitan environment, the default will be taken from the refs/targets/envs_defaults/mysql_port_${TARGET_NAME} file instead.

kapitan refs --write env:refs/targets/envs_defaults/mysql_port_${TARGET_NAME} -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write env:refs/targets/envs_defaults/mysql_port_${TARGET_NAME} -t ${TARGET_NAME} -f -\n
kapitan refs --write vaultkv:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f <input file>\n

which also works with pipes

cat input_file | kapitan refs --write vaultkv:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n

This backend expects the value to be stored as a key:value pair.

echo \"a_key:a_value\" | kapitan refs --write vaulttransit:refs/targets/${TARGET_NAME}/mysql/root_password -t ${TARGET_NAME} -f -\n

When reading from disk, the input file should be formatted accordingly.

"},{"location":"references/#automatically","title":"Automatically","text":"

Kapitan has built in capabilities to initialise its references on creation, using an elegant combination of primary and secondary functions. This is extremely powerful because it allows for you to make sure they are always initialised with sensible values.

"},{"location":"references/#primary-functions","title":"primary functions","text":"

To automate the creation of the reference, you can add one of the following primary functions to the reference tag by using the syntax ||primary_function:param1:param2

For instance, to automatically initialise a reference with a random string with a lenght of 32 characters, you can use the random primary function

```yaml\nparameters:\n  ...\n  mysql:\n    root_password: ?{${backend}:targets/${target_name}/mysql/root_password||random:str:32}\n  ...\n```\n

Initialise non existent references

The first operator here || is more similar to a logical OR.

  • If the reference file does not exist, Kapitan will use the function to initialise it
  • If the reference file exists, no functions will run.

Automate secret rotation with ease

You can take advantage of it to implement easy rotation of secrets. Simply delete the reference files, and run kapitan compile: let Kapitan do the rest.

randomprivate keysbasicauthreveal strintloweralphaupperalphaloweralphanumupperalphanumspecial

Generator function for alphanumeric characters, will be url-token-safe

?{${backend}:targets/${target_name}/mysql/root_password||random:str}\n

generator function for digits (0-9)

?{${backend}:targets/${target_name}/mysql/root_password||random:int}\n

generator function for lowercase letters (a-z)

?{${backend}:targets/${target_name}/mysql/root_password||random:loweralpha}\n

generator function for uppercase letters (A-Z)

?{${backend}:targets/${target_name}/mysql/root_password||random:upperalpha}\n

generator function for lowercase letters and numbers (a-z and 0-9)

?{${backend}:targets/${target_name}/mysql/root_password||random:loweralphanum}\n

generator function for uppercase letters and numbers (A-Z and 0-9)

?{${backend}:targets/${target_name}/mysql/root_password||random:upperalphanum}\n

generator function for alphanumeric characters and given special characters

?{${backend}:targets/${target_name}/mysql/root_password||random:special}\n
rsaed25519publickeyrsapublic

Generates an RSA 4096 private key (PKCS#8). You can optionally pass the key size

?{${backend}:targets/${target_name}/private_key||rsa}\n

Generates a ed25519 private key (PKCS#8)

?{${backend}:targets/${target_name}/private_key||ed25519}\n

Derives the public key from a revealed private key

?{${backend}:targets/${target_name}/private_key||rsa}\n?{${backend}:targets/${target_name}/public_key||reveal:targets/${target_name}/private_key|publickey}\n

DEPRECATED: use ||publickey

Generates a base64 encoded pair of username:password

?{${backend}:targets/${target_name}/apache_basicauth||basicauth:username:password}\n

Reveals the content of another reference, useful when deriving public keys or a reference requires a different encoding or the same value.

?{${backend}:targets/${target_name}/secret||random:str}\n?{${backend}:targets/${target_name}/base64_secret||reveal:targets/${target_name}/secret|base64}\n

attention when rotating secrets used with reveal

If you use reveal to initialise a reference, like my_reference||reveal:source_reference the my_reference will not be automatically updated if source_reference changes. Please make sure you also re-initialise my_reference correctly

"},{"location":"references/#secondary-functions","title":"secondary functions","text":"base64sha256

base64 encodes your reference

?{${backend}:targets/${target_name}/secret||random:str|base64}\n

sha256 hashes your reference param1: salt

?{${backend}:targets/${target_name}/secret||random:str|sha256}\n
"},{"location":"references/#reveal-references","title":"Reveal references","text":"

You can reveal the secrets referenced in the outputs of kapitan compile via:

```shell\nkapitan refs --reveal -f path/to/rendered/template\n```\n

For example, compiled/minikube-mysql/manifests/mysql_secret.yml with the following content:

```yaml\napiVersion: v1\ndata:\n  MYSQL_ROOT_PASSWORD: ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de}\n  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:targets/minikube-mysql/mysql/password_sha256:122d2732}\nkind: Secret\nmetadata:\n  annotations: {}\n  labels:\n    name: example-mysql\n  name: example-mysql\n  namespace: minikube-mysql\ntype: Opaque\n```\n

can be revealed as follows:

```shell\nkapitan refs --reveal -f compiled/minikube-mysql/manifests/mysql_secret.yml\n```\n

This will substitute the referenced secrets with the actual decrypted secrets stored at the referenced paths and display the file content.

You can also use:

```shell\nkapitan refs --reveal --ref-file refs/targets/all-glob/mysql/password\n```\n

or

```shell\nkapitan refs --reveal --tag \"?{base64:targets/all-glob/mysql/password}\"\n# or\nkapitan refs --reveal --tag \"?{base64:targets/all-glob/mysql/password:3192c15c}\"\n```\n

for more convenience.

"},{"location":"references/#embedded-refs","title":"Embedded refs","text":"

Please refer to the CLI reference

"},{"location":"references/#yaml-subvars-references","title":"YAML SubVars References","text":"

Kapitan is also able to use access specific keys in YAML content by using subvars.

For instance given a reference plain:larder with content:

```yaml\nfood:\n  apples: 1\n```\n

I could now have an inventory variable like:

```yaml\nparameters:\n  number_of_apples: ?{plain:larder@food.apple}\n```\n
"},{"location":"references/#using-subvars-to-ingest-yaml-from-command-line-tools","title":"Using subvars to ingest yaml from command line tools","text":"

Subvars can have a very practical use for storing YAML outputs coming straight from other tools. For instance, I could use the GCP gcloud command to get all the information about a cluster, and write it into a reference

```shell\ngcloud container clusters describe \\\n  --project ${TARGET_NAME}-project \\\n  gke-cluster --zone europe-west1 --format yaml \\\n    | kapitan refs --write plain:clusters/${TARGET_NAME}/cluster -t ${TARGET_NAME} -f -\n```\n

knowing the output of gcloud to produce yaml that contain the following values:

```yaml\n...\nname: gke-cluster\nreleaseChannel:\n  channel: REGULAR\nselfLink: https://container.googleapis.com/v1/projects/kapicorp/locations/europe-west1/clusters/gke-cluster\n...\n```\n

I can not reference the link to the cluster in the inventory using:

```yaml\nparameters:\n  cluster:\n    name: ?{plain:clusters/${target_name}/cluster@name} \n    release_channel: ?{plain:clusters/${target_name}/cluster@releaseChannel.channel}\n    link: ?{plain:clusters/${target_name}/cluster@selfLink}\n```\n

Combined with a Jinja template, I could write automatically documentation containing the details of the clusters I use.

```text\n{% set p = inventory.parameters %}\n# Documentation for {{p.target_name}}\n\nCluster [{{p.cluster.name}}]({{p.cluster.link}}) has release channel {{p.cluster.release_channel}}\n```\n
"},{"location":"references/#hashicorp-vault","title":"Hashicorp Vault","text":""},{"location":"references/#vaultkv","title":"vaultkv","text":"

Considering a key-value pair like my_key:my_secret in the path secret/foo/bar in a kv-v2(KV version 2) secret engine on the vault server, to use this as a secret use:

```shell\necho \"foo/bar:my_key\"  | kapitan refs --write vaultkv:path/to/secret_inside_kapitan -t <target_name> -f -\n```\n

To write a secret in the vault with kapitan use a ref tag with following structure:

```yaml\nparameters:\n  ...\n  secret:\n    my_secret: ?{vaultkv:targets/${target_name}/mypath:mount:path/in/vault:mykey||<functions>}\n  ...\n```\n

Leave mount empty to use the specified mount from vault params from the inventory (see below). Same applies to the path/in/vault where the ref path in kapitan gets taken as default value.

Parameters in the secret file are collected from the inventory of the target we gave from CLI -t <target_name>. If target isn't provided then kapitan will identify the variables from the environment when revealing secret.

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. Extra parameters that can be defined in inventory are:

  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • engine: secret engine used, either kv-v2 or kv (default kv-v2) Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID.

    parameters:\n  kapitan:\n    secrets:\n      vaultkv:\n        auth: userpass\n        engine: kv-v2\n        mount: team-alpha-secret\n        VAULT_ADDR: http://127.0.0.1:8200\n        VAULT_NAMESPACE: CICD-alpha\n        VAULT_SKIP_VERIFY: false\n        VAULT_CLIENT_KEY: /path/to/key\n        VAULT_CLIENT_CERT: /path/to/cert\n
"},{"location":"references/#vaulttransit","title":"vaulttransit","text":"

Considering a key-value pair like my_key:my_secret in the path secret/foo/bar in a transit secret engine on the vault server, to use this as a secret use:

```shell\necho \"any.value:whatever-you_may*like\"  | kapitan refs --write vaulttransit:my_target/to/secret_inside_kapitan -t <target_name> -f -\n```\n

Parameters in the secret file are collected from the inventory of the target we gave from CLI -t <target_name>. If target isn't provided then kapitan will identify the variables from the environment when revealing secret.

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. Extra parameters that can be defined in inventory are:

  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • mount: specify the mount point of key's path. e.g if path=my_mount (default transit)
  • crypto_key: Name of the encryption key defined in vault
  • always_latest: Always rewrap ciphertext to latest rotated crypto_key version Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID.

    parameters:\n  kapitan:\n    vars:\n      target: my_target\n      namespace: my_namespace\n    secrets:\n      vaulttransit:\n        VAULT_ADDR: http://vault.example.com:8200\n        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY\n        VAULT_SKIP_VERIFY: \"True\"\n        auth: token\n        mount: transit\n        crypto_key: new_key\n        always_latest: False\nparameters:\n  target_name: secrets\n  kapitan:\n    secrets:\n      vaulttransit:\n        VAULT_ADDR: http://127.0.0.1:8200\n        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY\n        VAULT_SKIP_VERIFY: \"True\"\n        auth: token\n        mount: transit\n        crypto_key: key\n        always_latest: False\n
"},{"location":"references/#azure-kms-secret-backend","title":"Azure KMS Secret Backend","text":"

To encrypt secrets using keys stored in Azure's Key Vault, a key_id is required to identify an Azure key object uniquely. It should be of the form https://{keyvault-name}.vault.azure.net/{object-type}/{object-name}/{object-version}.

"},{"location":"references/#defining-the-kms-key","title":"Defining the KMS key","text":"

This is done in the inventory under parameters.kapitan.secrets.

```yaml\nparameters:\n  kapitan:\n    vars:\n      target: ${target_name}\n      namespace: ${target_name}\n    secrets:\n      azkms:\n        key: 'https://<keyvault-name>.vault.azure.net/keys/<object-name>/<object-version>'\n```\n

The key can also be specified using the --key flag

"},{"location":"references/#creating-a-secret","title":"Creating a secret","text":"

Secrets can be created using any of the methods described in the \"creating your secret\" section.

For example, if the key is defined in the prod target file

```shell\necho \"my_encrypted_secret\" | kapitan refs --write azkms:path/to/secret_inside_kapitan -t prod -f -\n```\n

Using the --key flag and a key_id

```shell\necho \"my_encrypted_secret\" | kapitan refs --write azkms:path/to/secret_inside_kapitan --key=<key_id> -f -\n```\n
"},{"location":"references/#referencing-and-revealing-a-secret","title":"Referencing and revealing a secret","text":"

Secrets can be referenced and revealed in any of the ways described above.

For example, to reveal the secret stored at path/to/secret_inside_kapitan

```shell\nkapitan refs --reveal --tag \"?{azkms:path/to/secret_inside_kapitan}\"\n```\n

Note: Cryptographic algorithm used for encryption is rsa-oaep-256.

"},{"location":"related/","title":"Related projects","text":"
  • Tesoro - Kubernetes Admission Controller for Kapitan Secrets
  • Kapitan Reference - Reference repository to get started
  • sublime-jsonnet-syntax - Jsonnet syntax highlighting for Sublime Text
  • language-jsonnet - Jsonnet syntax highlighting for Atom
  • vim-jsonnet - Jsonnet plugin for Vim (requires a vim plugin manager)
"},{"location":"support/","title":"Get support with Kapitan","text":""},{"location":"support/#community","title":"Community","text":"
  • Join us on kubernetes.slack.com #kapitan(Get invited)
  • Follow us on Twitter @kapitandev.
  • Website https://kapitan.dev
  • Mailing List kapitan-discuss@googlegroups.com(Subscribe)
"},{"location":"support/#resources","title":"Resources","text":"
  • Main Blog, articles and tutorials: Kapitan Blog
  • Generators and reference kapitan repository: Kapitan Reference
  • Kapitan Reference: our reference repository to get started with Kapitan.
"},{"location":"tags/","title":"Tags","text":""},{"location":"tags/#community","title":"community","text":"
  • Proposals
  • Kapitan Code
  • Documentation
  • Sponsor Us
"},{"location":"tags/#kadet","title":"kadet","text":"
  • Kadet
"},{"location":"tags/#kubernetes","title":"kubernetes","text":"
  • Kadet
"},{"location":"kap_proposals/kap_0_kadet/","title":"Kadet","text":"

This introduces a new experimental input type called Kadet.

Kadet is essentially a Python module offering a set of classes and functions to define objects which will compile to JSON or YAML. A complete example is available in examples/kubernetes/components/nginx.

Author: @ramaro

","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#overview","title":"Overview","text":"","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#baseobj","title":"BaseObj","text":"

BaseObj implements the basic object implementation that compiles into JSON or YAML. Setting keys in self.root means they will be in the compiled output. Keys can be set as an hierarchy of attributes (courtesy of addict) The self.body() method is reserved for setting self.root on instantiation:

The example below:

class MyApp(BaseObj):\n def body(self):\n   self.root.name = \"myapp\"\n   self.root.inner.foo = \"bar\"\n   self.root.list = [1, 2, 3]\n

compiles into:

---\nname: myapp\ninner:\n  foo: bar\nlist:\n  - 1\n  - 2\n  - 3\n

The self.new() method can be used to define a basic constructor. self.need() checks if a key is set and errors if it isn't (with an optional custom error message). kwargs that are passed onto a new instance of BaseObj are always accessible via self.kwargs In this example, MyApp needs name and foo to be passed as kwargs.

class MyApp(BaseObj):\n def new(self):\n   self.need(\"name\")\n   self.need(\"foo\", msg=\"please provide a value for foo\")\n\n def body(self):\n   self.root.name = self.kwargs.name\n   self.root.inner.foo = self.kwargs.foo\n   self.root.list = [1, 2, 3]\n\nobj = MyApp(name=\"myapp\", foo=\"bar\")\n
","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#setting-a-skeleton","title":"Setting a skeleton","text":"

Defining a large body with Python can be quite hard and repetitive to read and write. The self.update_root() method allows importing a YAML/JSON file to set the skeleton of self.root.

MyApp's skeleton can be set instead like this:

#skel.yml\n---\nname: myapp\ninner:\n  foo: bar\nlist:\n  - 1\n  - 2\n  - 3\n
class MyApp(BaseObj):\n def new(self):\n   self.need(\"name\")\n   self.need(\"foo\", msg=\"please provide a value for foo\")\n   self.update_root(\"path/to/skel.yml\")\n

Extending a skeleton'd MyApp is possible just by implementing self.body():

class MyApp(BaseObj):\n def new(self):\n   self.need(\"name\")\n   self.need(\"foo\", msg=\"please provide a value for foo\")\n   self.update_root(\"path/to/skel.yml\")\n\n def body(self):\n   self.set_replicas()\n   self.root.metadata.labels = {\"app\": \"mylabel\"}\n\ndef set_replicas(self):\n   self.root.spec.replicas = 5\n
","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#inheritance","title":"Inheritance","text":"

Python inheritance will work as expected:

class MyOtherApp(MyApp):\n  def new(self):\n    super().new()  # MyApp's new()\n    self.need(\"size\")\n\ndef body(self):\n   super().body()  #  we want to extend MyApp's body\n   self.root.size = self.kwargs.size\n   del self.root.list  # get rid of \"list\"\n\nobj = MyOtherApp(name=\"otherapp1\", foo=\"bar2\", size=3)\n

compiles to:

---\nname: otherapp1\ninner:\n  foo: bar2\nreplicas: 5\nsize: 3\n
","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#components","title":"Components","text":"

A component in Kadet is a python module that must implement a main() function returning an instance ofBaseObj. The inventory is also available via the inventory() function.

For example, a tinyapp component:

# components/tinyapp/__init__.py\nfrom kapitan.inputs.kadet import BaseOBj, inventory\ninv = inventory() # returns inventory for target being compiled\n\nclass TinyApp(BaseObj):\n  def body(self):\n    self.root.foo = \"bar\"\n    self.root.replicas = inv.parameters.tinyapp.replicas\n\ndef main():\n  obj = BaseOb()\n  obj.root.deployment = TinyApp() # will compile into deployment.yml\n  return obj\n

An inventory class must be created for tinyapp:

# inventory/classes/components/tinyapp.yml\n\nparameters:\n  tinyapp:\n    replicas: 1\n  kapitan:\n    compile:\n    - output_path: manifests\n      input_type: kadet\n      output_type: yaml\n      input_paths:\n        - components/tinyapp\n
","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_0_kadet/#common-components","title":"Common components","text":"

A library in --search-paths (which now defaults to . and lib/) can also be a module that kadet components import. It is loaded using the load_from_search_paths():

kubelib = load_from_search_paths(\"kubelib\") # lib/kubelib/__init__.py\n\ndef main():\n  obj = BaseObj()\n  obj.root.example_app_deployment = kubelib.Deployment(name=\"example-app\")\n  return obj\n
","tags":["kubernetes","kadet"]},{"location":"kap_proposals/kap_10_azure_key_vault/","title":"Support for Azure Key Management","text":"

This feature will enable users to encrypt secrets using keys stored in Azure's Key Vault. The azkms keyword will be used to access the azure key management backend.

"},{"location":"kap_proposals/kap_10_azure_key_vault/#specification","title":"Specification","text":"

key_id uniquely identifies an Azure key object and it's version stored in Key Vault. It is of the form https://{keyvault-name}.vault.azure.net/{object-type}/{object-name}/{object-version}. It needs to be made accessible to kapitan in one of the following ways:

  • As a part of target
parameters:\n  kapitan:\n    secrets:\n      azkms:\n        key: key_id #eg https://kapitanbackend.vault.azure.net/keys/myKey/deadbeef\n
  • As a flag
kapitan refs --key=<key_id> --write azkms:/path/to/secret -f file_with_secret_data.txt\n
"},{"location":"kap_proposals/kap_10_azure_key_vault/#using-a-key-to-encrypt-a-secret","title":"Using a key to encrypt a secret","text":"

The following command will be used to encrypt a secret (using the specified key from Key Vault) and save it in the refs-path along with it's metadata

echo \"my_treasured_secret\"  | kapitan refs --write azkms:path/to/secret_inside_kapitan -t <target_name> -f -\n

The -t <target_name> is used to get the information about key_id.

Once the secret is Base64 encoded and encrypted using the key, it will be stored in path/to/secret_inside_kapitan as

data: bXlfdHJlYXN1cmVkX3NlY3JldAo=\nencoding: original\nkey: https://kapitanbackend.vault.azure.net/keys/myKey/deadbeef\ntype: azkms\n

note Cryptographic algorithm used for encryption would be rsa-oaep-256. Optimal Asymmetric Encryption Padding (OAEP) is a padding scheme often used together with RSA encryption.

"},{"location":"kap_proposals/kap_10_azure_key_vault/#referencing-a-secret","title":"referencing a secret","text":"

Secrets can be refered using ?{azkms:path/to/secret_id} e.g.

parameter:\n    mysql:\n        storage: 10G\n        storage_class: standard\n        image: mysql:latest\n        users:\n            root:\n                password: ?{azkms:path/to/secret}\n
"},{"location":"kap_proposals/kap_10_azure_key_vault/#revealing-a-secret","title":"Revealing a secret","text":"

After compilation, the secret reference will be postfixed with 8 characters from the sha256 hash of the retrieved password/secret

apiVersion: v1\ndata:\n  MYSQL_ROOT_PASSWORD: ?{azkms:path/to/secret:deadbeef}\nkind: Secret\nmetadata:\n  labels:\n    name: example-mysql\n  name: example-mysql\n  namespace: minikube-mysql\ntype: Opaque\n

To reveal the secret, the following command will be used $ kapitan ref --reveal -f compiled/file/containing/secret

"},{"location":"kap_proposals/kap_10_azure_key_vault/#dependencies","title":"Dependencies","text":"
  • azure-keyvault-keys
  • azure-identity

note Kapitan will not be responsible for authentication or access management to Azure

"},{"location":"kap_proposals/kap_11_hashicorp_vault_transit/","title":"Hashicorp Vault Transit","text":"

This feature allows the user to fetch secrets from Hashicorp Vault, with the new secret backend keyword 'vaulttransit'.

Author: @xqp @Moep90

"},{"location":"kap_proposals/kap_11_hashicorp_vault_transit/#specification","title":"Specification","text":"

The following variables need to be exported to the environment(depending on authentication used) where you will run kapitan refs --reveal in order to authenticate to your HashiCorp Vault instance:

  • VAULT_ADDR: URL for vault
  • VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server. Setting this variable is not recommended except during testing
  • VAULT_TOKEN: token for vault or file (~/.vault-tokens)
  • VAULT_ROLE_ID: required by approle
  • VAULT_SECRET_ID: required by approle
  • VAULT_USERNAME: username to login to vault
  • VAULT_PASSWORD: password to login to vault
  • VAULT_CLIENT_KEY: the path to an unencrypted PEM-encoded private key matching the client certificate
  • VAULT_CLIENT_CERT: the path to a PEM-encoded client certificate for TLS authentication to the Vault server
  • VAULT_CACERT: the path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
  • VAULT_CAPATH: the path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
  • VAULT_NAMESPACE: specify the Vault Namespace, if you have one

Considering any stringdata like any.value:whatever-you_may*like ( in our case let\u2019s encrypt any.value:whatever-you_may*like with vault transit ) using the key 2022-02-13-test in a transit secret engine with mount mytransit on the vault server, to use this as a secret either follow:

echo \"any.value:whatever-you_may*like\" > somefile.txt\nkapitan refs --write vaulttransit:<target_name>/to/secret_inside_kapitan --file somefile.txt --target <target_name>\n

or in a single line

echo \"any.value:whatever-you_may*like\"  | kapitan refs --write vaulttransit:<target_name>/to/secret_inside_kapitan -t <target_name> -f -\n

The entire string \"any.value:whatever-you_may*like\" will be encrypted by vault and looks like this in return: vault:v2:Jhn3UzthKcJ2s+sEiO60EUiDmuzqUC4mMBWp2Vjg/DGl+GDFEDIPmAQpc5BdIefkplb6yrJZq63xQ9s=. This then gets base64 encoded and stored in the secret_inside_kapitan. Now secret_inside_kapitan contains the following

data: dmF1bHQ6djI6SmhuM1V6dGhLY0oycytzRWlPNjBFVWlEbXV6cVVDNG1NQldwMlZqZy9ER2wrR0RGRURJUG1BUXBjNUJkSWVma3BsYjZ5ckpacTYzeFE5cz0=\nencoding: original\ntype: vaulttransit\nvault_params:\n  VAULT_ADDR: http://127.0.0.1:8200\n  VAULT_SKIP_VERIFY: 'True'\n  VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY\n  auth: token\n  crypto_key: key\n  mount: transit\n  always_latest: false\n

Encoding tells the type of data given to kapitan, if it is original then after decoding base64 we'll get the original secret and if it is base64 then after decoding once we still have a base64 encoded secret and have to decode again. Parameters in the secret file are collected from the inventory of the target we gave from CLI --target my_target. If target isn't provided then kapitan will identify the variables from the environment, but providing auth is necessary as a key inside target parameters like the one shown:

parameters:\n  kapitan:\n    vars:\n      target: my_target\n      namespace: my_namespace\n    secrets:\n      vaulttransit:\n        VAULT_ADDR: http://vault.example.com:8200\n        VAULT_TOKEN: s.i53a1DL83REM61UxlJKLdQDY\n        VAULT_SKIP_VERIFY: \"True\"\n        auth: token\n        mount: transit\n        crypto_key: new_key   \n        always_latest: False\n

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. Extra parameters that can be defined in inventory are:

  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • crypto_key: Name of the encryption key defined in vault
  • always_latest: Always rewrap ciphertext to latest rotated crypto_key version Environment variables should NOT be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID. This makes the secret_inside_kapitan file accessible throughout the inventory, where we can use the secret whenever necessary like ?{vaulttransit:${target_name}/secret_inside_kapitan}

Following is the example file having a secret and pointing to the vault ?{vaulttransit:${target_name}/secret_inside_kapitan}

parameters:\n  releases:\n    app_version: latest\n  app:\n    image: app:app-tag\n    release: ${releases:app_version}\n    replicas: ${replicas}\n    args:\n      - --verbose=${verbose}\n      - --password=?{vaulttransit:${target_name}/secret_inside_kapitan||random:str}\n

when ?{vaulttransit:${target_name}/secret_inside_kapitan} is compiled, it will look same with an 8 character prefix of sha256 hash added at the end like:

kind: Deployment\nmetadata:\n  name: app\n  namespace: my_namespace\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: app\n    spec:\n      containers:\n        - args:\n            - --verbose=True\n            - --password=?{vaulttransit:${target_name}/secret_inside_kapitan||random:str}\n          image: app:app-tag\n          name: app\n

Only the user with the required tokens/permissions can reveal the secrets. Please note that the roles and permissions will be handled at the Vault level. We need not worry about it within Kapitan. Use the following command to reveal the secrets:

kapitan refs --reveal -f compile/file/containing/secret\n

Following is the result of the app-deployment.md file after Kapitan reveal.

kind: Deployment\nmetadata:\n  name: app\n  namespace: my_namespace\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: app\n    spec:\n      containers:\n        - args:\n            - --verbose=True\n            - --password=\"any.value:whatever-you_may*like\"\n          image: app:app-tag\n          name: app\n
"},{"location":"kap_proposals/kap_11_hashicorp_vault_transit/#vault-policies","title":"Vault policies","text":"
path \"mytransit/encrypt/2022-02-13-test\" {\n    capabilities = [ \"create\", \"update\" ]\n}\n\npath \"mytransit/decrypt/2022-02-13-test\" {\n    capabilities = [ \"create\", \"update\" ]\n}\n
"},{"location":"kap_proposals/kap_11_hashicorp_vault_transit/#dependencies","title":"Dependencies","text":"
  • hvac is a python client for Hashicorp Vault
"},{"location":"kap_proposals/kap_1_external_dependencies/","title":"External dependencies","text":"

This features allows kapitan to fetch files from online repositories/sources during compile and store in a particular target directory.

Author: @yoshi-1224

"},{"location":"kap_proposals/kap_1_external_dependencies/#specification","title":"Specification","text":"

Specify the files to be fetched as follows:

parameters:\n kapitan:\n  dependencies:\n   - type: git | http[s]\n     output_path: <output_path>\n     source: <git/http[s]_url>    \n

The output path is the path to save the dependency into. For example, it could be /components/external/manifest.jsonnet. Then, the user can specify the fetched file as a kapitan.compile item along with the locally-created files.

Git type may also include ref and subdir parameters as illustrated below:

- type: git\n  output_path: <output_path>\n  source: <git_url>\n  subdir: relative/path/in/repository\n  ref: <commit_hash/branch/tag>\n  force_fetch: <bool>\n

If the file already exists at output_path, the fetch will be skipped. For fresh fetch of the dependencies, users may add --fetch option as follows:

kapitan compile --fetch\n

Users can also add the force_fetch: true option to the kapitan.dependencies in the inventory in order to force fetch of the dependencies of the target every time.

"},{"location":"kap_proposals/kap_1_external_dependencies/#implementation-details","title":"Implementation details","text":""},{"location":"kap_proposals/kap_1_external_dependencies/#dependencies","title":"Dependencies","text":"
  • GitPython module (and git executable) for git type
  • requests module for http[s]
  • (optional) tqdm for reporting download progress
"},{"location":"kap_proposals/kap_2_helm_charts_input_type/","title":"Helm Charts Input Type","text":"

This will allow kapitan, during compilation, to overwrite the values in user-specified helm charts using its inventory by calling the Go & Sprig template libraries. The helm charts can be specified via local path, and users may download the helm chart via external-dependency feature (of http[s] type).

Author: @yoshi-1224

"},{"location":"kap_proposals/kap_2_helm_charts_input_type/#specification","title":"Specification","text":"

This feature basically follows the helm template command available. This will run after the fetching of the external dependencies takes place, such that users can simultaneously specify the fetch as well as the import of a helm chart dependency.

"},{"location":"kap_proposals/kap_2_helm_charts_input_type/#semantics","title":"Semantics","text":"
kapitan:\n  compile:\n    - input_type: helm\n      input_path: <path_to_chart_dir> \n      output_path: <output_path>\n      set-file:\n        - <optional_file_path>\n        - ...\n      values_file: <optional_values_file>\n      namespace: <optional_namespace>\n

This mostly maps to the options available to helm template command (refer to here).

"},{"location":"kap_proposals/kap_2_helm_charts_input_type/#implementation-details","title":"Implementation details","text":"

C-binding between Helm (Go) and Kapitan (Python) will be created. Helm makes use of two template libraries, namely, text/template and Sprig. The code for helm template command will be converted into shared object (.so) using CGo, which exposes C interface that kapitan (i.e. CPython) could use. The source code for helm template command is found here. This file will be modified to

  1. remove redundant options
  2. expose C-interface for Kapitan
"},{"location":"kap_proposals/kap_2_helm_charts_input_type/#dependencies","title":"Dependencies","text":"
  • (possibly) pybindgen
"},{"location":"kap_proposals/kap_3_schema_validation/","title":"Schema Validation (for k8s)","text":"

If a yaml/json output is to be used as k8s manifest, users may specify its kind and have kapitan validate its structure during kapitan compile. The plan is to have this validation feature extendable to other outputs as well, such as terraform.

Author: @yoshi-1224

"},{"location":"kap_proposals/kap_3_schema_validation/#specification","title":"Specification","text":"

The following inventory will validate the structure of Kubernetes Service manifest file in .

parameters:\n  kapitan:\n    validate:\n       - output_type: kubernetes.service \n         version: 1.6.6\n         output_path: relative/path/in/target\n

version parameter is optional: if omitted, the version will be set to the stable release of kubernetes (tbc).

"},{"location":"kap_proposals/kap_3_schema_validation/#implementation","title":"Implementation","text":"
  • The schemas will be downloaded by requests from this repository.
  • Caching of schema will also be implemented.
"},{"location":"kap_proposals/kap_3_schema_validation/#dependencies","title":"Dependencies","text":"
  • jsonschema to validate the output yaml/json against the correct schema
"},{"location":"kap_proposals/kap_4_standalone_executable/","title":"Standalone Kapitan Executable (Discontinued)","text":"

Create a portable (i.e. static) kapitan binary for users. This executable will be made available for each release on Github. The target/tested platform is Debian 9 (possibly Windows to be supported in the future).

Criteria:

  • speed of the resulting binary
  • size of the resulting binary
  • portability of the binary (single-file executable or has an accompanying folder)
  • cross-platform
  • actively maintained
  • supports Python 3.6, 3.7

Author: @yoshi-1224

"},{"location":"kap_proposals/kap_4_standalone_executable/#tools-to-be-explored","title":"Tools to be explored","text":"
  • (tentative first-choice) Pyinstaller
  • (Alternative) nuitka (also part of GSoC 2019. It might soon support single-file executable output).
"},{"location":"kap_proposals/kap_5_ref_types_redesign/","title":"Ref Types Redesign","text":"

Redesign Kapitan Secrets and rename them as References or Ref.

Breaking changes:

  • $ kapitan secrets is replaced with $ kapitan refs
  • the default secrets directory ./secrets/ changes to ./refs/
  • the --secrets-path flag changes to --refs-path
  • ref ref type is renamed to base64 e.g.?{ref:some/ref} into ?{base64:some/ref}

Status: In progress

Author: @ramaro

"},{"location":"kap_proposals/kap_5_ref_types_redesign/#proposal","title":"Proposal","text":"

Rename Secrets into Ref (or References) to improve consistency and meaning of the backend types by removing the ref backend and introducting new backends:

Type Description Encrypted? Compiles To gpg GnuPG Yes hashed tag gkms Google KMS Yes hashed tag awskms Amazon KMS Yes hashed tag base64 base64 No hashed tag plain plain text No plain text

The type value will now need to be representative of the way a reference is stored via its backend.

A new plain backend type is introduced and will compile into revealed state instead of a hashed tag.

A new base64 backend type will store a base64 encoded value as the backend suggests (replacing the old badly named ref backend).

The command line for secrets will be instead:

kapitan refs --write gpg:my/secret1 ...\nkapitan refs --write base64:my/file ...\nkapitan refs --write plain:my/info ...\n
"},{"location":"kap_proposals/kap_5_ref_types_redesign/#plain-backend","title":"plain backend","text":"

The plain backend type will allow referring to external state by updating refs programmatically (e.g. in your pipeline)

For example, one can update the value of an environment variable and use ?{plain:my/user} as a reference in a template:

echo $USER | kapitan refs --write plain:my/user -f -\n

Or update a docker image value as ref ?{plain:images/dev/envoy}:

echo 'envoyproxy/envoy:v1.10.0' | kapitan refs --write plain:images/dev/envoy -f -\n

These references will be compiled into their values instead of hashed tags.

"},{"location":"kap_proposals/kap_5_ref_types_redesign/#base64-backend","title":"base64 backend","text":"

The base64 backend type will function as the original ref type. Except that this time, the name is representative of what is actually happening :)

"},{"location":"kap_proposals/kap_5_ref_types_redesign/#refs-path","title":"Refs path","text":"

Refs will be stored by default in the ./refs path set by --refs-path replacing the --secrets-path flag.

"},{"location":"kap_proposals/kap_5_ref_types_redesign/#background","title":"Background","text":""},{"location":"kap_proposals/kap_5_ref_types_redesign/#kapitan-secrets","title":"Kapitan Secrets","text":"

Kapitan Secrets allow referring to restricted information (passwords, private keys, etc...) in templates while also securely storing them.

On compile, secret tags are updated into hashed tags which validate and instruct Kapitan how to reveal tags into decrypted or encoded information.

"},{"location":"kap_proposals/kap_5_ref_types_redesign/#kapitan-secrets-example","title":"Kapitan Secrets example","text":"

The following command creates a GPG encrypted secret with the contents of file.txt for recipient ramaro@google.com to read:

kapitan secrets --write gpg:my/secret1 -f file.txt --recipients ramaro@google.com\n

This secret can be referred to in a jsonnet compoment:

{\n    \"type\": \"app\",\n    \"name\": \"test_app\",\n    \"username\": \"user_one\",\n    \"password\": \"?{gpg:my/secret1}\"\n}\n

When this compoment is compiled, it looks like (note the hashed tag):

type: app\nname: test_app\nusername: user_one\npassword: ?{gpg:my/secret1:deadbeef}\n

A user with the required permissions can reveal the compiled component:

$ kapitan secrets --reveal -f compiled/mytarget/manifests/component.yml\n\ntype: app\nname: test_app\nusername: user_one\npassword: secret_content_of_file.txt\n
"},{"location":"kap_proposals/kap_5_ref_types_redesign/#secret-backend-comparison","title":"Secret Backend Comparison","text":"

Kapitan today offers multiple secret backends:

Type Description Encrypted? Compiles To gpg GnuPG Yes hashed tag gkms Google KMS Yes hashed tag awskms Amazon KMS Yes hashed tag ref base64 No hashed tag

However, not all backends are encrypted - this is not consistent!

The ref type is not encrypted as its purpose is to allow getting started with the Kapitan Secrets workflow without the need of setting up the encryption backends tooling (gpg, gcloud, boto, etc...)

"},{"location":"kap_proposals/kap_6_hashicorp_vault/","title":"Hashicorp Vault","text":"

This feature allows the user to fetch secrets from Hashicorp Vault, with the new secret backend keyword 'vaultkv'.

Author: @vaibahvk @daminisatya

"},{"location":"kap_proposals/kap_6_hashicorp_vault/#specification","title":"Specification","text":"

The following variables need to be exported to the environment(depending on authentication used) where you will run kapitan refs --reveal in order to authenticate to your HashiCorp Vault instance:

  • VAULT_ADDR: URL for vault
  • VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server. Setting this variable is not recommended except during testing
  • VAULT_TOKEN: token for vault or file (~/.vault-tokens)
  • VAULT_ROLE_ID: required by approle
  • VAULT_SECRET_ID: required by approle
  • VAULT_USERNAME: username to login to vault
  • VAULT_PASSWORD: password to login to vault
  • VAULT_CLIENT_KEY: the path to an unencrypted PEM-encoded private key matching the client certificate
  • VAULT_CLIENT_CERT: the path to a PEM-encoded client certificate for TLS authentication to the Vault server
  • VAULT_CACERT: the path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
  • VAULT_CAPATH: the path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
  • VAULT_NAMESPACE: specify the Vault Namespace, if you have one

Considering a key-value pair like my_key:my_secret ( in our case let\u2019s store hello:batman inside the vault ) in the path secret/foo in a kv-v2(KV version 2) secret engine on the vault server, to use this as a secret either follow:

echo \"foo:hello\" > somefile.txt\nkapitan refs --write vaultkv:path/to/secret_inside_kapitan --file somefile.txt --target dev-sea\n

or in a single line

echo \"foo:hello\"  | kapitan refs --write vaultkv:path/to/secret_inside_kapitan -t dev-sea -f -\n

The entire string \"foo:hello\" is base64 encoded and stored in the secret_inside_kapitan. Now secret_inside_kapitan contains the following

data: Zm9vOmhlbGxvCg==\nencoding: original\ntype: vaultkv\nvault_params:\n  auth: token\n

Encoding tells the type of data given to kapitan, if it is original then after decoding base64 we'll get the original secret and if it is base64 then after decoding once we still have a base64 encoded secret and have to decode again. Parameters in the secret file are collected from the inventory of the target we gave from CLI --target dev-sea. If target isn't provided then kapitan will identify the variables from the environment, but providing auth is necessary as a key inside target parameters like the one shown:

parameters:\n  kapitan:\n    secrets:\n      vaultkv:\n        auth: userpass\n        engine: kv-v2\n        mount: team-alpha-secret\n        VAULT_ADDR: http://127.0.0.1:8200\n        VAULT_NAMESPACE: CICD-alpha\n        VAULT_SKIP_VERIFY: false\n        VAULT_CLIENT_KEY: /path/to/key\n        VAULT_CLIENT_CERT: /path/to/cert\n

Environment variables that can be defined in kapitan inventory are VAULT_ADDR, VAULT_NAMESPACE, VAULT_SKIP_VERIFY, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY, VAULT_CAPATH & VAULT_CACERT. Extra parameters that can be defined in inventory are:

  • auth: specify which authentication method to use like token,userpass,ldap,github & approle
  • mount: specify the mount point of key's path. e.g if path=alpha-secret/foo/bar then mount: alpha-secret (default secret)
  • engine: secret engine used, either kv-v2 or kv (default kv-v2) Environment variables cannot be defined in inventory are VAULT_TOKEN,VAULT_USERNAME,VAULT_PASSWORD,VAULT_ROLE_ID,VAULT_SECRET_ID. This makes the secret_inside_kapitan file accessible throughout the inventory, where we can use the secret whenever necessary like ?{vaultkv:path/to/secret_inside_kapitan}

Following is the example file having a secret and pointing to the vault ?{vaultkv:path/to/secret_inside_kapitan}

parameters:\n  releases:\n    cod: latest\n  cod:\n    image: alledm/cod:${cod:release}\n    release: ${releases:cod}\n    replicas: ${replicas}\n    args:\n      - --verbose=${verbose}\n      - --password=?{vaultkv:path/to/secret_inside_kapitan}\n

when ?{vaultkv:path/to/secret_inside_kapitan} is compiled, it will look same with an 8 character prefix of sha256 hash added at the end like:

kind: Deployment\nmetadata:\n  name: cod\n  namespace: dev-sea\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: cod\n    spec:\n      containers:\n        - args:\n            - --verbose=True\n            - --password=?{vaultkv:path/to/secret_inside_kapitan:57d6f9b7}\n          image: alledm/cod:v2.0.0\n          name: cod\n

Only the user with the required tokens/permissions can reveal the secrets. Please note that the roles and permissions will be handled at the Vault level. We need not worry about it within Kapitan. Use the following command to reveal the secrets:

kapitan refs --reveal -f compile/file/containing/secret\n

Following is the result of the cod-deployment.md file after Kapitan reveal.

kind: Deployment\nmetadata:\n  name: cod\n  namespace: dev-sea\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: cod\n    spec:\n      containers:\n        - args:\n            - --verbose=True\n            - --password=batman\n          image: alledm/cod:v2.0.0\n          name: cod\n
"},{"location":"kap_proposals/kap_6_hashicorp_vault/#dependencies","title":"Dependencies","text":"
  • hvac is a python client for Hashicorp Vault
"},{"location":"kap_proposals/kap_7_remote_inventory/","title":"Remote Inventory Federation","text":"

This feature would add the ability to Kapitan to fetch parts of the inventory from remote locations (https/git). This would allow users to combine different inventories from different sources and build modular infrastructure reusable across various repos.

Author: @alpharoy14

"},{"location":"kap_proposals/kap_7_remote_inventory/#specification","title":"Specification","text":"

The configuration and declaration of remote inventories would be done in the inventory files.

The file specifications are as follows:

parameters:\n kapitan:\n  inventory:\n   - type: <inventory_type> #git\\https\n     source: <source_of_inventory>\n     output_path: <relative_output_path>\n

On executing the $ kapitan compile --fetch command, first the remote inventories will be fetched followed by fetching of external dependencies and finally merge the inventory to compile.

"},{"location":"kap_proposals/kap_7_remote_inventory/#copying-inventory-files-to-the-output-location","title":"Copying inventory files to the output location","text":"

The output path is the path to save the inventory items into. The path is relative to the inventory/ directory. For example, it could be /classes/. The contents of the fetched inventory will be recursively copied.

The fetched inventory files will be cached in the .dependency_cache directory if --cache is set. eg. $ kapitan compile --fetch --cache

"},{"location":"kap_proposals/kap_7_remote_inventory/#force-fetching","title":"Force fetching","text":"

While fetching, the output path will be recursively checked to see if it contains any file with the same name. If so, kapitan will skip fetching it.

To overwrite the files with the newly downloaded inventory items, we can add the --force-fetch flag to the compile command, as shown below.

$ kapitan compile --force-fetch

"},{"location":"kap_proposals/kap_7_remote_inventory/#url-type","title":"URL type","text":"

The URL type can be either git or http(s). Depending on the URL type, the configuration file may have additional arguments.

E.g Git type may also include aditional ref parameter as illustrated below:

inventory:\n - type: git #git\\https\n   source: <source_of_inventory>\n   output_path: <output_path>\n   ref: <commit_hash/branch/tag>\n
"},{"location":"kap_proposals/kap_7_remote_inventory/#implementation-details","title":"Implementation details","text":"

TODO

"},{"location":"kap_proposals/kap_7_remote_inventory/#dependencies","title":"Dependencies","text":"
  • GitPython module (and git executable) for git type
  • requests module for http[s]
"},{"location":"kap_proposals/kap_8_google_secret_management/","title":"Support for Google Secret Manager","text":"

This feature will enable users to retrieve secrets from Google Secret Manager API using the gsm keyword.

"},{"location":"kap_proposals/kap_8_google_secret_management/#specification","title":"Specification","text":"

project_id uniquely identifies GCP projects, and it needs to be made accessible to kapitan in one of the following ways:

  • As a part of target
parameters:\n  kapitan:\n    secrets:\n      gsm:\n        project_id: Project_Id\n
  • As a flag
kapitan refs --google-project-id=<Project_Id> --write gsm:/path/to/secret_id -f secret_id_file.txt\n
  • As an environment variable
export PROJECT_ID=<Project_Id>\n
"},{"location":"kap_proposals/kap_8_google_secret_management/#using-a-secret","title":"Using a secret","text":"

In GCP, a secret contains one or more secret versions, along with its metadata. The actual contents of a secret are stored in a secret version. Each secret is identified by a name. We call that variable secret_id e.g. my_treasured_secret. The URI of the secret becomes projects/<Project_Id>/secrets/my_treasured_secret

The following command will be used to add a secret_id to kapitan.

echo \"my_treasured_secret\"  | kapitan refs --write gsm:path/to/secret_inside_kapitan -t <target_name> -f -\n

The -t <target_name> is used to get the information about Project_ID.

The secret_id is Base64 encoded and stored in path/to/secret_inside_kapitan as

data: bXlfdHJlYXN1cmVkX3NlY3JldAo=\nencoding: original\ntype: gsm\ngsm_params:\n  project_id: Project_ID\n
"},{"location":"kap_proposals/kap_8_google_secret_management/#referencing-a-secret","title":"referencing a secret","text":"

Secrets can be refered using ?{gsm:path/to/secret_id:version_id} e.g.

parameter:\n    mysql:\n        storage: 10G\n        storage_class: standard\n        image: mysql:latest\n        users:\n            root:\n                password: ?{gsm:path/to/secret_id:version_id}\n

Here, version_id will be an optional argument. By default it will point to latest.

"},{"location":"kap_proposals/kap_8_google_secret_management/#revealing-a-secret","title":"Revealing a secret","text":"

After compilation, the secret reference will be postfixed with 8 characters from the sha256 hash of the retrieved password

apiVersion: v1\ndata:\n  MYSQL_ROOT_PASSWORD: ?{gsm:path/to/secret_id:version_id:deadbeef}\nkind: Secret\nmetadata:\n  labels:\n    name: example-mysql\n  name: example-mysql\n  namespace: minikube-mysql\ntype: Opaque\n

To reveal the secret, the following command will be used $ kapitan ref --reveal -f compiled/file/containing/secret

"},{"location":"kap_proposals/kap_8_google_secret_management/#dependencies","title":"Dependencies","text":"
  • google-cloud-secret-manager

note Kapitan will not be responsible for authentication or access management to GCP

"},{"location":"kap_proposals/kap_8_modularize_kapitan/","title":"Modularize Kapitan","text":"

Kapitan is packaged in PYPI and as a binary along with all its dependencies. Adding an extra key/security backend means that we need to ship another dependency with that PYPI package, making deploying changes more complicated. This project would modularize kapitan into core dependencies and extra modules.

"},{"location":"kap_proposals/kap_8_modularize_kapitan/#usage","title":"Usage","text":"
pip3 install --user kapitan # to install only core dependencies\nPip3 install --user kapitan[gkms] \u200b# gkms is the module\n
"},{"location":"kap_proposals/kap_8_modularize_kapitan/#implementation","title":"Implementation","text":"
  • The main module includes the essential kapitan dependencies and reclass dependencies, which will be included in the \u200brequirement.txt\u200b file.
  • The extra module pypi extras will be defined in the s\u200betup.py\u200b file.
  • The extra dependencies are of secret backends like (AWS Key backend, Google KMS Key backend, Vault Key backend etc.) and Helm support.
"},{"location":"kap_proposals/kap_9_bring_your_own_helm/","title":"Bring Your Own Helm Proposal","text":""},{"location":"kap_proposals/kap_9_bring_your_own_helm/#the-problem","title":"The Problem","text":"

Currently the helm binding can't be run on Mac OSX. Attempts to fix this have been made on several occasions:

  • https://github.com/kapicorp/kapitan/pull/414
  • https://github.com/kapicorp/kapitan/pull/547
  • https://github.com/kapicorp/kapitan/pull/568

There are some issues with the current bindings besides the lack of Mac OSX support. The golang runtime (1.14) selected will effect older versions helm templates: https://github.com/helm/helm/issues/7711. Users can't select the version of helm they'd like to use for templating.

"},{"location":"kap_proposals/kap_9_bring_your_own_helm/#solution","title":"Solution","text":"

Users supply their own helm binary. This allows them to control the version of golang runtime and version of helm they'd like to use.

In Kapitan we could rewrite the interface to use subprocess and perform commands. The cli of helm 2 vs helm 3 is slightly different but shouldn't be difficult to codify.

This would be great to get rid of cffi and golang which will reduce complexity and build time of the project.

Depending on how this goes, this could pave the way for a \"bring your own binary\" input type.

"},{"location":"pages/external_dependencies/","title":"External dependencies","text":"

Kapitan has the functionality to fetch external dependencies from remote locations.

Supported dependencies types are:

  • git
  • http
  • helm
"},{"location":"pages/external_dependencies/#usage","title":"Usage","text":"

Kapitan by default will not attempt to download any dependency, and rely on what is already available.

"},{"location":"pages/external_dependencies/#basic-fetching","title":"Basic fetching","text":"

You can use the fetch option to explicitly fetch the dependencies:

clidotfile
kapitan compile --fetch\n

.kapitan

to make it default, then simply use kapitan compile

...\ncompile:\n  fetch: true \n

This will download the dependencies and store them at their respective output_path.

"},{"location":"pages/external_dependencies/#overwrite-local-changes","title":"Overwrite local changes","text":"

When fetching a dependency, Kapitan will refuse to overwrite existing files to preserve your local modifications.

Use the force-fetch option to force overwrite your local files in the output_path.

clidotfile
kapitan compile --force-fetch\n

.kapitan

to make it default, then simply use kapitan compile

...\ncompile:\n  force-fetch: true \n
"},{"location":"pages/external_dependencies/#caching","title":"Caching","text":"

Kapitan also supports caching Use the --cache flag to cache the fetched items in the .dependency_cache directory in the root project directory.

```shell\nkapitan compile --cache --fetch\n```\n
"},{"location":"pages/external_dependencies/#defining-dependencies","title":"Defining dependencies","text":"githttphelm"},{"location":"pages/external_dependencies/#syntax","title":"Syntax","text":"
parameters:\n  kapitan:\n    dependencies:\n    - type: git\n      output_path: path/to/dir\n      source: git_url # mkdocs (1)!\n      subdir: relative/path/from/repo/root (optional) # mkdocs (2)!\n      ref: tag, commit, branch etc. (optional) # mkdocs (3)!\n      submodules: true/false (optional) # mkdocs (4)!\n
  1. Git types can fetch external git repositories through either HTTP/HTTPS or SSH URLs.
  2. Optional supports for cloning just a sub-directory
  3. Optional support for accessing them in specific commits and branches (refs).
  4. Optional support to disable fetching the submodules of a repo.

Note

This type depends on the git binary installed on your system and available to Kapitan.

"},{"location":"pages/external_dependencies/#example","title":"Example","text":"

Say we want to fetch the source code from our kapitan repository, specifically, kapicorp/kapitan/kapitan/version.py. Let's create a very simple target file inventory/targets/kapitan-example.yml.

parameters:\n  kapitan:\n    vars:\n      target: kapitan-example\n    dependencies:\n    - type: git\n      output_path: source/kapitan\n      source: git@github.com:kapicorp/kapitan.git\n      subdir: kapitan\n      ref: master\n      submodules: true\n    compile:\n    - input_paths:\n      - source/kapitan/version.py\n      input_type: jinja2 # just to copy the file over to target\n      output_path: .\n
"},{"location":"pages/external_dependencies/#syntax_1","title":"Syntax","text":"
parameters:\n  kapitan:\n    dependencies:\n    - type: http | https # mkdocs (2)!\n      output_path: path/to/file # mkdocs (1)!\n      source: http[s]://<url> # mkdocs (2)!\n      unpack: True | False # mkdocs (3)! \n
  1. output_path must fully specify the file name. For example:
  2. http[s] types can fetch external dependencies available at http:// or https:// URL.
  3. archive mode: download and unpack
"},{"location":"pages/external_dependencies/#example_1","title":"Example","text":"Single fileArchive

Say we want to download kapitan README.md file. Since it's on Github, we can access it as https://raw.githubusercontent.com/kapicorp/kapitan/master/README.md. Using the following inventory, we can copy this to our target folder:

parameters:\n  kapitan:\n    vars:\n      target: kapitan-example\n    dependencies:\n    - type: https\n      output_path: README.md\n      source: https://raw.githubusercontent.com/kapicorp/kapitan/master/README.md\n    compile:\n    - input_paths:\n      - README.md\n      input_type: jinja2\n      output_path: .\n
"},{"location":"pages/external_dependencies/#syntax_2","title":"Syntax","text":"
parameters:\n  kapitan:\n    dependencies:\n    - type: helm\n      output_path: path/to/chart\n      source: http[s]|oci://<helm_chart_repository_url>\n      version: <specific chart version>\n      chart_name: <name of chart>\n      helm_path: <helm binary>\n

Fetches helm charts and any specific subcharts in the requirements.yaml file.

helm_path can be used to specify where the helm binary name or path. It defaults to the value of the KAPITAN_HELM_PATH environment var or simply to helm if neither is set. You should specify only if you don't want the default behavior.

source can be either the URL to a chart repository, or the URL to a chart on an OCI registry (supported since Helm 3.8.0).

"},{"location":"pages/external_dependencies/#example_2","title":"Example","text":"

If we want to download the prometheus helm chart we simply add the dependency to the monitoring target. We want a specific version 11.3.0 so we put that in.

parameters:\n  kapitan:\n    vars:\n      target: monitoring\n    dependencies:\n      - type: helm\n        output_path: charts/prometheus\n        source: https://kubernetes-charts.storage.googleapis.com\n        version: 11.3.0\n        chart_name: prometheus\n    compile:\n      - input_type: helm\n        output_path: .\n        input_paths:\n          - charts/prometheus\n        helm_values:\n        alertmanager:\n            enabled: false\n        helm_params:\n          namespace: monitoring\n          name: prometheus\n
"},{"location":"pages/kapitan_overview/","title":"Kapitan Overview","text":""},{"location":"pages/kapitan_overview/#kapitan-at-a-glance","title":"Kapitan at a glance","text":"

Kapitan is a powerful configuration management tool designed to help engineers manage complex systems through code. It centralizes and simplifies the management of configurations with a structured approach that revolves around a few core concepts.

Kapitan diagram
%%{ init: { securityLevel: 'loose'} }%%\ngraph LR\n    classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n    classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n    TARGET1 --> KAPITAN\n    TARGET2 --> KAPITAN\n    TARGETN --> KAPITAN\n    KAPITAN --> EXTERNAL\n    KAPITAN --> GENERATORS\n    KAPITAN --> HELM\n    KAPITAN --> JINJA\n    KAPITAN --> JSONNET\n    KAPITAN --> KADET\n    EXTERNAL --> OUTPUT\n    GENERATORS --> OUTPUT\n    JINJA --> OUTPUT\n    JSONNET --> OUTPUT\n    KADET --> OUTPUT\n    HELM --> OUTPUT\n    GKMS --> REFERENCES\n    AWSKMS --> REFERENCES\n    VAULT --> REFERENCES\n    OTHER --> REFERENCES\n    PLAIN --> REFERENCES\n    OUTPUT --> TARGETN_OUTPUT\n    OUTPUT --> TARGET1_OUTPUT \n    OUTPUT --> TARGET2_OUTPUT \n    REFERENCES --> KAPITAN\n    TARGET1_OUTPUT --> DOCUMENTATION \n    TARGET1_OUTPUT --> KUBERNETES\n    TARGET1_OUTPUT --> SCRIPTS \n    TARGET1_OUTPUT --> TERRAFORM\n    CLASSES --> TARGET1\n    CLASSES --> TARGET2\n    CLASSES --> TARGETN\n\n    subgraph \"Inventory\"\n        CLASSES[classes]\n        TARGET1([\"target 1\"]):::pink\n        TARGET2([\"target 2\"])\n        TARGETN([\"target N\"])\n    end\n\n    subgraph \"references\"\n        direction TB\n        GKMS[\"GCP KMS\"]\n        AWSKMS[\"AWS KMS\"]\n        VAULT[\"Hashicorp Vault\"]\n        OTHER[\"others\"]\n        PLAIN[\"plain\"]\n        REFERENCES[\"references\"]\n    end\n\n    KAPITAN((\"<img src='/images/kapitan_logo.png'; width='80'/>\")):::blue\n    click EXTERNAL \"/compile#external\"\n\n    subgraph \"Input Types\" \n        EXTERNAL[\"external\"]\n        GENERATORS[\"generators\"]\n        HELM[\"helm\"]\n        JINJA[\"jinja\"]\n        JSONNET[\"jsonnet\"]\n        KADET[\"kadet\"]\n    end\n\n    OUTPUT{{\"compiled output\"}}:::blue\n\n\n\n    subgraph \" \"\n        TARGET1_OUTPUT([target1]):::pink\n        DOCUMENTATION[\"docs\"]\n        KUBERNETES[\"manifests\"]\n        SCRIPTS[\"scripts\"]\n        TERRAFORM[\"terraform\"]\n    end\n\n    TARGET2_OUTPUT([\"target 2\"])\n    TARGETN_OUTPUT([\"target N\"])\n

Let's explore these concepts in a way that's accessible to new users:

"},{"location":"pages/kapitan_overview/#inventory","title":"Inventory","text":"

At the core of Kapitan lies the Inventory, a structured database of variables meticulously organized in YAML files. This hierarchical setup serves as the single source of truth (SSOT) for your system's configurations, making it easier to manage and reference the essential components of your infrastructure. Whether you're dealing with Kubernetes configurations, Terraform resources, or even business logic, the Inventory allows you to define and store these elements efficiently. This central repository then feeds into Kapitan's templating engines, enabling seamless reuse across various applications and services.

"},{"location":"pages/kapitan_overview/#input-types","title":"Input Types","text":"

Kapitan takes the information stored in the Inventory and brings it to life through its templating engines upon compilation. This process transforms static data into dynamic configurations, capable of generating a wide array of outputs like Kubernetes manifests, Terraform plans, documentation, and scripts. It's about making your configurations work for you, tailored to the specific needs of your projects.

See Input Types for more

"},{"location":"pages/kapitan_overview/#generators","title":"Generators","text":"

Generators offer a straightforward entry point into using Kapitan, requiring minimal to no coding experience. These are essentially pre-made templates that allow you to generate common configuration files, such as Kubernetes manifests, directly from your Inventory data. Kapitan provides a wealth of resources, including the Kapitan Reference GitHub repository and various blog posts, to help users get up and running with generators.

"},{"location":"pages/kapitan_overview/#kadet","title":"Kadet","text":"

For those looking to leverage the full power of Kapitan, Kadet introduces a method to define and reuse complex configurations through Python. This internal library facilitates the creation of JSON and YAML manifests programmatically, offering a higher degree of customization and reuse. Kadet empowers users to craft intricate configurations with the simplicity and flexibility of Python.

"},{"location":"pages/kapitan_overview/#references","title":"References","text":"

Kapitan References provide a secure way to store passwords, settings, and other essential data within your project. Think of them as special code placeholders.

  • Flexibility: Update a password once, and Kapitan updates it everywhere automatically.
  • Organization: References tidy up your project, especially when you're juggling multiple settings or environments (dev, staging, production). Security: Protect sensitive information like passwords with encryption

Tip

Use Tesoro, our Kubernetes Admission Controller, to complete your integration with Kubernetes for secure secret decryption on-the-fly.

"},{"location":"pages/remote_repositories/","title":"Remote Inventories","text":"

Kapitan is capable of recursively fetching inventory items stored in remote locations and copy it to the specified output path. This feature can be used by specifying those inventory items in classes or targets under parameters.kapitan.inventory. Supported types are:

  • git type
  • http type

Class items can be specified before they are locally available as long as they are fetched in the same run. Example of this is given below.

"},{"location":"pages/remote_repositories/#git-type","title":"Git type","text":"

Git types can fetch external inventories available via HTTP/HTTPS or SSH URLs. This is useful for fetching repositories or their sub-directories, as well as accessing them in specific commits and branches (refs).

Note: git types require git binary on your system.

"},{"location":"pages/remote_repositories/#definition","title":"Definition","text":"
parameters:\n  kapitan:\n    inventory:\n    - type: git\n      output_path: path/to/dir\n      source: git_url\n      subdir: relative/path/from/repo/root (optional)\n      ref: tag, commit, branch etc. (optional)\n
"},{"location":"pages/remote_repositories/#example","title":"Example","text":"

Lets say we want to fetch a class from our kapitan repository, specifically kapicorp/kapitan/tree/master/examples/docker/inventory/classes/dockerfiles.yml.

Lets create a simple target file docker.yml

Note

external dependencies are used to fetch dependency items in this example.

targets/docker.yml

classes:\n  - dockerfiles\nparameters:\n  kapitan:\n    vars:\n      target: docker\n    inventory:\n      - type: git\n        source: https://github.com/kapicorp/kapitan\n        subdir: examples/docker/inventory/classes/\n        output_path: classes/\n    dependencies:\n      - type: git\n        source: https://github.com/kapicorp/kapitan\n        subdir: examples/docker/components\n        output_path: components/\n      - type: git\n        source: https://github.com/kapicorp/kapitan\n        subdir: examples/docker/templates\n        output_path: templates/\n  dockerfiles:\n  - name: web\n    image: amazoncorretto:11\n  - name: worker\n    image: amazoncorretto:8\n
kapitan compile --fetch\n
click to expand output
[WARNING] Reclass class not found: 'dockerfiles'. Skipped!\n[WARNING] Reclass class not found: 'dockerfiles'. Skipped!\nInventory https://github.com/kapicorp/kapitan: fetching now\nInventory https://github.com/kapicorp/kapitan: successfully fetched\nInventory https://github.com/kapicorp/kapitan: saved to inventory/classes\nDependency https://github.com/kapicorp/kapitan: saved to components\nDependency https://github.com/kapicorp/kapitan: saved to templates\nCompiled docker (0.11s)\n
"},{"location":"pages/remote_repositories/#http-type","title":"http type","text":"

http[s] types can fetch external inventories available at http:// or https:// URL.

"},{"location":"pages/remote_repositories/#definition_1","title":"Definition","text":"
parameters:\n  kapitan:\n    inventory:\n    - type: http | https\n      output_path: full/path/to/file.yml\n      source: http[s]://<url>\n      unpack: True | False # False by default\n
"},{"location":"pages/remote_repositories/#example_1","title":"Example","text":"

targets/mysql-generator-fetch.yml

classes:\n  - common\n  - kapitan.generators.kubernetes\nparameters:\n  kapitan:\n    inventory:\n      - type: https\n        source: https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml\n        output_path: classes/kapitan/generators/kubernetes.yml\n  components:\n    mysql:\n      image: mysql\n
kapitan compile --fetch\n
click to expand output
./kapitan compile -t mysql-generator-fetch --fetch\nInventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: fetching now\nInventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: successfully fetched\nInventory https://raw.githubusercontent.com/kapicorp/kapitan-reference/master/inventory/classes/kapitan/generators/kubernetes.yml: saved to inventory/classes/kapitan/generators/kubernetes.yml\n\n...\ncut\n...\n\nCompiled mysql-generator-fetch (0.06s)\n
"},{"location":"pages/blog/","title":"Blog","text":""},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/","title":"5 Years of Kapitan","text":"

Last October we quietly celebrated 5 years of Kapitan.

In 5 years, we've been able to witness a steady and relentless of Kapitan, which has however never caught the full attention of the majority of the community.

The main issue has always been around an embarassing lack of documentation, and we've worked hard to improve on that, with more updates due soon.

Let this first blog post from a revamped website be a promise to our community of a better effort in explaining what sets Kapitan apart, and makes it the only tool of its kind.

And let's start with a simple question: Why do you even need Kapitan?

Credits

In reality Kapitan's heatbeat started about 9 months earlier at DeepMind Health, created by [**Ricardo Amaro**](https://github.com/ramaro) with the help of some of my amazing team: in no particular order [Adrian Chifor](https://github.com/adrianchifor), [Paul S](https://github.com/uberspot) and [Luis Buriola](https://github.com/gburiola). It was then kindly released to the community by Google/DeepMind and is has so been improved thanks to more than [50 contributors](https://github.com/kapicorp/kapitan/graphs/contributors).\n
"},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/#why-do-i-need-kapitan","title":"Why do I need Kapitan?","text":"

Kapitan is a hard sell, but a rewarding one. For these main reasons:

  1. Kapitan solves problems that some don\u2019t know/think to have.
  2. Some people by now have probably accepted the Status Quo and think that some suffering is part of their job descriptions.
  3. Objectively, Kapitan requires an investment of effort to learn how to use a new tool, and this adds friction.

All I can say it is very rewarding once you get to use it, so stick with me while I try to explain the problems that Kapitan is solving

"},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/#the-problems","title":"The problems","text":"

It would be reductive to list the problems that Kapitan solves, because sometimes we ourselves are stunned by what Kapitan is being used for, so I will start with some common relatable ones, and perhaps that will give you the right framing to understand how to use it with your setup.

In its most basic explanation, Kapitan solves the problem of avoiding duplication of configuration data: by consolidating it in one place (the Inventory), and making it accessible by all the tools and languages it integrates with (see Input Types).

This configuration data is then used by Kapitan (templates) to configure and operate a number of completely distinct and unaware tools which would normally not be able to share their configurations.

"},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/#without-kapitan","title":"Without Kapitan","text":"

Let's consider the case where you want to define a new bucket, with a given bucket_name. Without Kapitan you would probably need to:

  • Write a PR on your Terraform repository to create the new bucket.
  • Which name should I use? Make sure to write it down! CTRL-C
  • Write a PR for your values.yaml file to configure your Helm chart: <CTRL-V>
  • Write somewhere some documentation to write down the bucket name and why it exists. Another <CTRL-V>
  • Another PR to change some **kustomize** configuration for another service to tell it to use the new bucket <CTRL-V>
  • Days after, time to upload something to that bucket: gsutil cp my_file wait_what_was_the_bucket_name_again.. Better check the documentation: CTRL-C + <CTRL-V>
"},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/#with-kapitan","title":"With Kapitan","text":"

When using Kapitan, your changes are likely to be contained within one PR, from which you can have a full view of everything that is happening. What happens is explained in this flow

\n%%{ init: { securityLevel: 'loose'} }%%\ngraph LR\n    classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n    classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n    classDef bold color:#000,font-weight: bold;\n\n    DATA --> KAPITAN\n    BUCKET --> DATA\n    KAPITAN --> KUBERNETES\n    KAPITAN --> TERRAFORM\n    KAPITAN --> DOCUMENTATION\n    KAPITAN --> SCRIPT\n    KAPITAN --> HELM\n    KUBERNETES -->  BUCKET_K8S\n    TERRAFORM -->  BUCKET_TF\n    DOCUMENTATION  --> BUCKET_DOC\n    SCRIPT --> BUCKET_SCRIPT\n    HELM --> BUCKET_HELM\n\n\n    DATA[(\"All your data\")]\n    BUCKET(\"bucket_name\")\n    KAPITAN((\"<img src='/images/kapitan_logo.png'; width='150'/>\")):::blue\n\n\n    subgraph \" \"\n      KUBERNETES([\"Kubernetes\"]):::pink\n      BUCKET_K8S(\".. a ConfigMap uses bucket_name\"):::bold\n    end\n    subgraph \" \"\n    TERRAFORM([\"Terraform\"]):::pink\n    BUCKET_TF(\"..creates the bucket bucket_name\"):::bold\n    end\n    subgraph \" \"\n    DOCUMENTATION([\"Documentation\"]):::pink\n    BUCKET_DOC(\"..references a link to bucket_name\"):::bold\n    end\n    subgraph \" \"\n    SCRIPT([\"Canned Script\"]):::pink\n    BUCKET_SCRIPT(\"..knows how to upload files to bucket_name\"):::bold\n    end\n    subgraph \" \"\n    HELM([\"Helm\"]):::pink\n    BUCKET_HELM(\"..configures a chart to use the bucket_name\"):::bold\n    end

Thanks to its flexiblility, you can use Kapitan to generate all sorts of configurations: Kubernetes and Terraform resources, ArgoCD pipelines, Docker Compose files, random configs, scripts, documentations and anything else you find relevant. The trick is obviously on how to drive these changes, but it is not as complicated as it sounds. We'll get there soon enough!

Let's see now another example of things that are so established in the way to do things that become elusivly impossible to see. As a way to highlight the potential issues with this way of doing things, let's ask some questions on your current setup. We pick on Kubernetes this time.

"},{"location":"pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/#kubernetes","title":"Kubernetes","text":"

I\u2019ll start with Kubernetes, such a popular and brilliant solution to problems most people should not be concerned with (jokes apart, I adore Kubernetes). To most, Kubernetes is that type of solution that quickly turns into a problem of its own right.

So.. how do you deploy to Kubernetes right now?

Helm comes to mind first, right?

Kapitan + Helm: BFF

In spite of Kapitan being initially considered (even by ourselves) as an alternative to Helm, we\u2019ve actually enjoyed the benefits of integrating with this amazing tool and the ecosystem it gives us access to. So yes, good news: you can use Helm right from within Kapitan!.

Well, let\u2019s put that to a test. How do you manage your Helm charts? I\u2019ll attempt to break these questions down into categories.

Code OrganizationDRYMaintenanceOperationsDocumentationSecrets managementEverything else
  • Where do you keep your Helm charts?
    • In a single repository?
    • How many repositories?
    • Alongside the code you develop?
  • What about the official ones that you didn't create yourself?
  • How many values.yaml files do you have?
  • How much consistency is there between them? any snowflakes?
  • If you change something, like with the bucket_name example above:
    • how many places do you need to go and update?
    • And how many times do you get it wrong?
  • Don't you feel all your charts look the same?
    • Yet how many times do you need to deviate from the one you thought captured everything?
    • What if you need to make a change to all your charts at once: how do you deal with it?
  • What about configuration files, how do you deal with templating those?
  • How do you deal with \u201cofficial\u201d charts, do they always cover what you want to do?
  • How do you deal with modifications that you need to apply to your own version of a an official chart?
  • What if you need to make a change that affects ALL your charts?
  • Or if the change is for all the charts for a set of microservices?
  • How many times you find yourself seting parameters on the command line of Helm and other tools?
  • How many times did you connect to the wrong context in Kubernetes
  • How many of your colleagues have the same clean context setup as you have?
  • How many things are there that you wish you were tracking?
  • How do I connect to the production database? Which user is it again?
  • How easy is it for you to create a new environment from scratch?
    • Are you sure?
    • When was the last time you tried?
  • How easy is it to keep your configuration up to date?
  • Does your documentation need to be \u201cunderstood\u201d or can be just executed on?
    • How many conditionals like this do you have in your documentation?

      NOTE: Cluster X in project Y has an older version of Q and requires you to do Z instead N because of A, B and C!

  • Would you be able to follow those instructions at 3am on a Sunday morning?
  • How do you handle secrets in your repository?
  • Do you know how to create your secrets from scratch?
  • Do you remember that token you created 4 months ago? How did you do that?
  • How long would it take you?
  • Is the process of creating them \u201csecure\u201d?
    • Or does it leave you with random certificates and tokens unencrypted on your \u201cDownloads\u201d folder?
  • The above concerns: do they also apply to other things you manage?
  • Terraform?
  • Pipelines?
  • Random other systems you interact with?

I\u2019ll stop here because I do not want to lose you, and neither do I want to discourage you.

But if you look around it\u2019s true, you do have a very complicated setup. And Kapitan can help you streamline it for you. In fact, Kapitan can leave you with a consistent and uniform way to manage all these concerns at once.

My job here is done: you have awakened and you won't look at your setup in the same way. Keep tuned and learn about how Kapitan can change the way you do things.

"},{"location":"pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0310/","title":"New Kapitan release v0.31.0","text":"

The Kapicorp team is happy to to announce a new release of Kapitan.

This release is yet another great bundle of features and improvements over the past year, the majority of which have been contributions from our community!

Head over our release page on GitHub for a full list of features and contributors.

If you missed it, have a look at our latest blog post here 5 years of Kapitan

Please help us by visiting our Sponsor Kapitan page.

"},{"location":"pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0320/","title":"New Kapitan release v0.32.0","text":"

The Kapicorp team is happy to to announce a new release of Kapitan.

This release contains loads of improvements for the past 6 months, the majority of which have been contributions from our community!

Head over our release page on GitHub for a full list of features and contributors.

Please help us by visiting our Sponsor Kapitan page.

"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/","title":"Deploying Keda with Kapitan","text":"

We have worked hard to bring out a brand new way of experience Kapitan, through something that we call generators

Although the concept is something we've introduced in 2020 with our blog post Keep your ship together with Kapitan, the sheer amount of new capabilities (and frankly, the embarassing lack of documentation and examples) forces me to show you the new capabilities using a practicle example: deploying Keda.

"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#objective-of-this-tutorial","title":"Objective of this tutorial","text":"

We are going to deploy Keda using the helm chart approach. While Kapitan supports a native way to deploy helm charts using the helm input type, we are going instead to use a generator based approach using the \"charts\" generator.

This tutorial will show you how to configure kapitan to:

  • download a helm chart
  • compile a helm chart
  • modify a helm chart using mutations

The content of this tutorial is already available on the kapitan-reference

"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#deploying-keda","title":"Deploying KEDA","text":""},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#define-parameters","title":"Define parameters","text":"
## inventory/classes/components/keda.yml\nparameters:\n  keda:\n    params:\n      # Variables to reference from other places\n      application_version: 2.11.2\n      service_account_name: keda-operator\n      chart_name: keda\n      chart_version: 2.11.2\n      chart_dir: system/sources/charts/${keda:params:chart_name}/${keda:params:chart_name}/${keda:params:chart_version}/${keda:params:application_version}\n      namespace: keda\n      helm_values: {}\n...\n

Override Helm Values

As an example we could be passing to helm an override to the default values parameters to make the operator deploy 2 replicas.

  helm_values:\n    operator:\n      replicaCount: 2  \n
"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#download-the-chart","title":"Download the chart","text":"

Kapitan supports downloading dependencies, including helm charts.

When Kapitan is run with the --fetch, it will download the dependency if not already present. Use --force-fetch if you want to download it every time. Learn more about External dependencies

## inventory/classes/components/keda.yml\n...\n  kapitan:\n    dependencies:\n      # Tells kapitan to download the helm chart into the chart_dir directory\n      - type: helm\n        output_path: ${keda:params:chart_dir}\n        source: https://kedacore.github.io/charts\n        version: ${keda:params:chart_version}\n        chart_name: ${keda:params:chart_name}\n...\n

Parameter interpolation

Notice how we are using parameter interpolation from the previously defined keda.params section. This will make it easier in the future to override some aspects of the configuration on a per-target base.

"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#generate-the-chart","title":"Generate the chart","text":"
## inventory/classes/components/keda.yml\n...\n  charts:\n     # Configures a helm generator to compile files for the given chart\n    keda:\n      chart_dir: ${keda:params:chart_dir}\n      helm_params:\n        namespace: ${keda:params:namespace}\n        name: ${keda:params:chart_name}\n      helm_values: ${keda:params:helm_values}\n
"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#compile","title":"Compile","text":"

Before we can see any effect, we need to attach the class to a target. We will create a simple target which looks like

# inventory/targets/tutorials/keda.yml\nclasses:\n- common\n- components.keda\n

Now when we run kapitan compile we will see the chart being donwloaded and the manifests being produced.

./kapitan compile -t keda --fetch\nDependency keda: saved to system/sources/charts/keda/keda/2.11.2/2.11.2\nRendered inventory (1.87s)\nCompiled keda (2.09s)\n

kapitan compile breakdown

  • --fetch tells kapitan to fetch the chart if it is not found locally
  • -t keda tells kapitan to compile only the previously defined keda.yml target
ls -l compiled/keda/manifests/\ntotal 660\n-rw-r--r-- 1 ademaria root 659081 Aug 29 10:25 keda-bundle.yml\n-rw-r--r-- 1 ademaria root     79 Aug 29 10:25 keda-namespace.yml\n-rw-r--r-- 1 ademaria root   7092 Aug 29 10:25 keda-rbac.yml\n-rw-r--r-- 1 ademaria root   1783 Aug 29 10:25 keda-service.yml\n
"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#using-mutations","title":"Using mutations","text":"

Now let's do a couple of things that would not be easy to do with helm natively.

You can already notice that the content of the chart is being splitted into multiple files: this is because the Generator is configured to separate different resources types into different files for convenience and consistency. The mechanism behing it is the \"Mutation\" of type \"bundle\" which tells Kapitan which file to save a resource into.

Here are some example \"mutation\" which separates different kinds into different files

        mutations:\n          bundle:\n            - conditions:\n                kind: [Ingress]\n              filename: '{content.component_name}-ingress'\n              ...\n            - conditions:\n                kind: [HorizontalPodAutoscaler, PodDisruptionBudget, VerticalPodAutoscaler]\n              filename: '{content.component_name}-scaling'\n            - conditions:\n                kind: ['*']\n              filename: '{content.component_name}-bundle'\n

Catch-all rule

Notice the catchall rule at the end that puts everything that has not matched into the bundle.yml file

"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#bundle-mutation","title":"bundle mutation","text":"

Currently most of the keda related resources are bundled into the -bundle.yml file Instead, we want to separate them into their own file.

Let's add this configuration:

  charts:\n     # Configures a helm generator to compile files for the given chart\n    keda:\n      chart_dir: ${keda:params:chart_dir}\n      ... \n      mutations:\n        bundle:\n          - conditions:\n              # CRDs need to be setup separately\n              kind: [CustomResourceDefinition]\n            filename: '{content.component_name}-crds'\n

Upon compile, you can now see that the CRD are being moved to a different file:

ls -l compiled/keda/manifests/\ntotal 664\n-rw-r--r-- 1 ademaria root  11405 Aug 29 10:56 keda-bundle.yml\n-rw-r--r-- 1 ademaria root 647672 Aug 29 10:56 keda-crds.yml\n-rw-r--r-- 1 ademaria root     79 Aug 29 10:56 keda-namespace.yml\n-rw-r--r-- 1 ademaria root   7092 Aug 29 10:56 keda-rbac.yml\n-rw-r--r-- 1 ademaria root   1783 Aug 29 10:56 keda-service.yml\n
"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#patch-mutation","title":"patch mutation","text":"

As we are using Argo, we want to pass a special argocd.argoproj.io/sync-options annotation to the CRD only so that ArgoCD can handle them properly.

For this we are going to use the patch mutation:

...\n      mutations:\n...\n        patch:\n          - conditions:\n              kind: [CustomResourceDefinition]\n            patch:\n              metadata:\n                annotations:\n                  argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true\n

Upon compile, you can now see that the CRDs have been modified as required:

diff --git a/compiled/keda/manifests/keda-crds.yml b/compiled/keda/manifests/keda-crds.yml\nindex 2662bf3..9306c3a 100644\n--- a/compiled/keda/manifests/keda-crds.yml\n+++ b/compiled/keda/manifests/keda-crds.yml\n@@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1\n kind: CustomResourceDefinition\n metadata:\n   annotations:\n+    argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true\n     controller-gen.kubebuilder.io/version: v0.12.0\n
"},{"location":"pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/#summary","title":"Summary","text":"

With this tutorial have explored some capabilities of Kapitan to manage and perform changes to helm charts. Next tutorial will show how to make use of Keda and deploy a generator for Keda resources

"},{"location":"pages/blog/12/02/2024/kapitan-logo-new-kapitan-release--v0331/","title":"New Kapitan release v0.33.1","text":"

The Kapicorp team is happy to to announce a new release of Kapitan.

This release contains loads of improvements for the past 8 months, the majority of which have been contributions from our community!

Head over our release page on GitHub for a full list of features and contributors.

Please help us by visiting our Sponsor Kapitan page.

"},{"location":"pages/commands/kapitan_compile/","title":"CLI Reference | kapitan compile","text":""},{"location":"pages/commands/kapitan_compile/#kapitan-compile","title":"kapitan compile","text":"

Merges inventory and inputs and produces generated files in the output folder (/compiled by default)

"},{"location":"pages/commands/kapitan_compile/#compile-all-targets","title":"Compile all targets","text":"
kapitan compile\n
click to expand output
Compiled mysql-generator-fetch (0.18s)\nCompiled vault (0.25s)\nCompiled pritunl (0.22s)\nCompiled gke-pvm-killer (0.05s)\nCompiled examples (0.30s)\nCompiled mysql (0.08s)\nCompiled postgres-proxy (0.06s)\nCompiled echo-server (0.06s)\nCompiled global (0.03s)\nCompiled guestbook-argocd (0.08s)\nCompiled tutorial (0.13s)\nCompiled kapicorp-project-123 (0.03s)\nCompiled kapicorp-demo-march (0.03s)\nCompiled kapicorp-terraform-admin (0.03s)\nCompiled sock-shop (0.32s)\nCompiled tesoro (0.09s)\nCompiled dev-sockshop (0.32s)\nCompiled prod-sockshop (0.38s)\nCompiled argocd (2.29s)\n
"},{"location":"pages/commands/kapitan_compile/#selective-compilation","title":"Selective compilation","text":""},{"location":"pages/commands/kapitan_compile/#using-target-names","title":"Using target names","text":"

Compiles one or more targets selected by name using --targets or -t

kapitan compile -t mysql tesoro\n
click to expand output
Compiled mysql (0.06s)\nCompiled tesoro (0.09s)\n
"},{"location":"pages/commands/kapitan_compile/#using-labels","title":"Using labels","text":"

Compiles one or more targets selected matching labels with --labels or -l

Info

This works if you have labelled your targets using the following syntax:

parameters:\n  ...\n  kapitan:\n    ...\n    labels:\n      customer: acme\n

see Labels for more details

$ kapitan compile -l customer=acme\nCompiled acme-project (0.14s)\nCompiled acme-pipelines (0.10s)\n
"},{"location":"pages/commands/kapitan_compile/#fetch-on-compile","title":"Fetch on compile","text":"

Use the --fetch flag to fetch Remote Inventories and the External Dependencies.

kapitan compile --fetch\n

This will download the dependencies according to their configurations By default, kapitan does not overwrite an existing item with the same name as that of the fetched inventory items.

Use the --force-fetch flag to force fetch (update cache with freshly fetched items) and overwrite inventory items of the same name in the output_path.

kapitan compile --force-fetch\n

Use the --cache flag to cache the fetched items in the .dependency_cache directory in the root project directory.

kapitan compile --cache --fetch\n
"},{"location":"pages/commands/kapitan_compile/#embed-references","title":"Embed references","text":"

By default, Kapitan references are stored encrypted (for backends that support encription) in the configuration repository under the /refs directory.

For instance, a reference tag ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de} would point to a phisical file on disk under /refs like:

refs/targets/minikube-mysql/mysql/password

data: hQEMA8uOJKdm07XTAQgAp5i [[ CUT ]] BwqYc3g7PI09HCJZdU=\nencoding: base64\nrecipients:\n- fingerprint: D9234C61F58BEB3ED8552A57E28DC07A3CBFAE7C\ntype: gpg\n

The --embed-refs flags tells Kapitan to embed these references on compile, alongside the generated output. By doing so, compiled output is self-contained and can be revealed by Tesoro or other tools.

kapitan compile --embed-refs\n

See how the compiled output for this specific target changes to embed the actul encrypted content, (marked by ?{gpg: :embedded} to indicate it is a gpg reference) rather than just holding a reference to it (like in this case ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de} which points to ).

click to expand output
diff --git a/examples/kubernetes/compiled/minikube-mysql/manifests/mysql_app.yml b/examples/kubernetes/compiled/minikube-mysql/manifests/mysql_app.yml\n[[ CUT ]]\napiVersion: v1\ndata:\n-  MYSQL_ROOT_PASSWORD: ?{gpg:targets/minikube-mysql/mysql/password:ec3d54de}\n-  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:targets/minikube-mysql/mysql/password_sha256:122d2732}\n+  MYSQL_ROOT_PASSWORD: ?{gpg:eyJkYXRhIjogImhR [[ CUT ]] gInR5cGUiOiAiZ3BnIn0=:embedded}\n+  MYSQL_ROOT_PASSWORD_SHA256: ?{gpg:eyJkYXRhI [[ CUT ]] eXBlIjogImdwZyJ9:embedded}\n
"},{"location":"pages/commands/kapitan_compile/#help","title":"help","text":"
kapitan compile --help\n
click to expand output
usage: kapitan compile [-h] [--inventory-backend {reclass}]\n               [--search-paths JPATH [JPATH ...]]\n               [--jinja2-filters FPATH] [--verbose] [--prune]\n               [--quiet] [--output-path PATH] [--fetch]\n               [--force-fetch] [--force] [--validate]\n               [--parallelism INT] [--indent INT]\n               [--refs-path REFS_PATH] [--reveal] [--embed-refs]\n               [--inventory-path INVENTORY_PATH] [--cache]\n               [--cache-paths PATH [PATH ...]]\n               [--ignore-version-check] [--use-go-jsonnet]\n               [--compose-target-name] [--schemas-path SCHEMAS_PATH]\n               [--yaml-multiline-string-style STYLE]\n               [--yaml-dump-null-as-empty]\n               [--targets TARGET [TARGET ...] | --labels\n               [key=value ...]]\n\noptions:\n  -h, --help            show this help message and exit\n  --inventory-backend {reclass,reclass-rs}\n                        Select the inventory backend to use (default=reclass)\n  --search-paths JPATH [JPATH ...], -J JPATH [JPATH ...]\n                        set search paths, default is [\".\"]\n  --jinja2-filters FPATH, -J2F FPATH\n                        load custom jinja2 filters from any file, default is\n                        to put them inside lib/jinja2_filters.py\n  --verbose, -v         set verbose mode\n  --prune               prune jsonnet output\n  --quiet               set quiet mode, only critical output\n  --output-path PATH    set output path, default is \".\"\n  --fetch               fetch remote inventories and/or external dependencies\n  --force-fetch         overwrite existing inventory and/or dependency item\n  --force               overwrite existing inventory and/or dependency item\n  --validate            validate compile output against schemas as specified\n                        in inventory\n  --parallelism INT, -p INT\n                        Number of concurrent compile processes, default is 4\n  --indent INT, -i INT  Indentation spaces for YAML/JSON, default is 2\n  --refs-path REFS_PATH\n                        set refs path, default is \"./refs\"\n  --reveal              reveal refs (warning: this will potentially write\n                        sensitive data)\n  --embed-refs          embed ref contents\n  --inventory-path INVENTORY_PATH\n                        set inventory path, default is \"./inventory\"\n  --cache, -c           enable compilation caching to .kapitan_cache and\n                        dependency caching to .dependency_cache, default is\n                        False\n  --cache-paths PATH [PATH ...]\n                        cache additional paths to .kapitan_cache, default is\n                        []\n  --ignore-version-check\n                        ignore the version from .kapitan\n  --use-go-jsonnet      use go-jsonnet\n  --compose-target-name   Create same subfolder structure from inventory/targets\n                        inside compiled folder\n  --schemas-path SCHEMAS_PATH\n                        set schema cache path, default is \"./schemas\"\n  --yaml-multiline-string-style STYLE, -L STYLE\n                        set multiline string style to STYLE, default is\n                        'double-quotes'\n  --yaml-dump-null-as-empty\n                        dumps all none-type entries as empty, default is\n                        dumping as 'null'\n  --targets TARGET [TARGET ...], -t TARGET [TARGET ...]\n                        targets to compile, default is all\n  --labels [key=value ...], -l [key=value ...]\n                        compile targets matching the labels, default is all\n
"},{"location":"pages/commands/kapitan_dotfile/","title":"CLI Reference | .kapitan config file","text":""},{"location":"pages/commands/kapitan_dotfile/#kapitan","title":".kapitan","text":"

Kapitan allows you to coveniently override defaults by specifying a local .kapitan file in the root of your repository (relative to the kapitan configuration):

This comes handy to make sure Kapitan runs consistently for your specific setup.

Info

Any Kapitan command can be overridden in the .kapitan dotfile, but here are some of the most common examples.

"},{"location":"pages/commands/kapitan_dotfile/#version","title":"version","text":"

To enforce the Kapitan version used for compilation (for consistency and safety), you can add version to .kapitan:

version: 0.30.0\n\n...\n

This constrain can be relaxed to allow minor versions to be also accepted:

version: 0.30 # Allows any 0.30.x release to run\n\n...\n
"},{"location":"pages/commands/kapitan_dotfile/#command-line-flags","title":"Command line flags","text":"

You can also permanently define all command line flags in the .kapitan config file. For example:

...\n\ncompile:\n  indent: 4\n  parallelism: 8\n

would be equivalent to running:

kapitan compile --indent 4 --parallelism 8\n

For flags which are shared by multiple commands, you can either selectively define them for single commmands in a section with the same name as the command, or you can set any flags in section global, in which case they're applied for all commands. If you set a flag in both the global section and a command's section, the value from the command's section takes precedence over the value from the global section.

As an example, you can configure the inventory-path in the global section of the Kapitan dotfile to make sure it's persisted across all Kapitan runs.

...\n\nglobal:\n  inventory-path: ./some_path\n

which would be equivalent to running any command with --inventory-path=./some_path.

Another flag that you may want to set in the global section is inventory-backend to select a non-default inventory backend implementation.

global:\n  inventory-backend: reclass\n

which would be equivalent to always running Kapitan with --inventory-backend=reclass.

Please note that the inventory-backend flag currently can't be set through the command-specific sections of the Kapitan config file.

"},{"location":"pages/commands/kapitan_inventory/","title":"CLI Reference | kapitan inventory","text":""},{"location":"pages/commands/kapitan_inventory/#kapitan-inventory","title":"kapitan inventory","text":"

Renders the resulting inventory values for a specific target.

For example, rendering the inventory for the mysql target:

kapitan inventory -t mysql\n
click to expand output
__reclass__:\n  environment: base\n  name: mysql\n  node: mysql\n  timestamp: Wed Nov 23 23:19:28 2022\n  uri: yaml_fs:///src/inventory/targets/examples/mysql.yml\napplications: []\nclasses:\n  - kapitan.kube\n  - kapitan.generators.kubernetes\n  - kapitan.generators.argocd\n  - kapitan.generators.terraform\n  - kapitan.generators.rabbitmq\n  - kapitan.common\n  - common\n  - components.mysql\nenvironment: base\nexports: {}\nparameters:\n  _reclass_:\n    environment: base\n    name:\n      full: mysql\n      short: mysql\n  components:\n    mysql:\n      config_maps:\n        config:\n          data:\n            mysql.cnf:\n              value: ignore-db-dir=lost+found\n            mytemplate.cnf:\n              template: components/mysql/mytemplate.cnf.j2\n              values:\n                mysql:\n                  client:\n                    port: 3306\n                    socket: /var/run/mysqld/mysqld.sock\n                  mysqld:\n                    bind-address: 127.0.0.1\n                    max_allowed_packet: 64M\n                    thread_concurrency: 8\n          mount: /etc/mysql/conf.d/\n      env:\n        MYSQL_DATABASE: ''\n        MYSQL_PASSWORD:\n          secretKeyRef:\n            key: mysql-password\n        MYSQL_ROOT_PASSWORD:\n          secretKeyRef:\n            key: mysql-root-password\n        MYSQL_USER: ''\n      image: mysql:5.7.28\n      ports:\n        mysql:\n          service_port: 3306\n      secrets:\n        secrets:\n          data:\n            mysql-password:\n              value: ?{plain:targets/mysql/mysql-password||randomstr|base64}\n            mysql-root-password:\n              value: ?{plain:targets/mysql/mysql-root-password||randomstr:32|base64}\n          versioned: true\n      type: statefulset\n      volume_claims:\n        datadir:\n          spec:\n            accessModes:\n              - ReadWriteOnce\n            resources:\n              requests:\n                storage: 10Gi\n            storageClassName: standard\n      volume_mounts:\n        datadir:\n          mountPath: /var/lib/mysql\n  docs:\n    - templates/docs/README.md\n  generators:\n    manifest:\n      default_config:\n        annotations:\n          manifests.kapicorp.com/generated: 'true'\n        service_account:\n          create: false\n        type: deployment\n  kapitan:\n    compile:\n      - input_paths:\n          - components/generators/kubernetes\n        input_type: kadet\n        output_path: manifests\n        output_type: yml\n      - input_params:\n          function: generate_docs\n          template_path: templates/docs/service_component.md.j2\n        input_paths:\n          - components/generators/kubernetes\n        input_type: kadet\n        output_path: docs\n        output_type: plain\n      - input_params:\n          function: generate_pre_deploy\n        input_paths:\n          - components/generators/kubernetes\n        input_type: kadet\n        output_path: pre-deploy\n        output_type: yml\n      - input_paths:\n          - components/generators/argocd\n        input_type: kadet\n        output_path: argocd\n        output_type: yml\n      - input_params:\n          generator_root: resources.tf\n        input_paths:\n          - components/generators/terraform\n        input_type: kadet\n        output_path: terraform\n        output_type: json\n      - ignore_missing: true\n        input_paths:\n          - resources/state/mysql/.terraform.lock.hcl\n        input_type: copy\n        output_path: terraform/\n      - input_paths:\n          - components/generators/rabbitmq\n        input_type: kadet\n        output_path: rabbitmq\n        output_type: yml\n      - input_paths:\n          - templates/docs/README.md\n        input_type: jinja2\n        output_path: docs\n      - input_paths: []\n        input_type: jinja2\n        output_path: scripts\n      - input_paths: []\n        input_type: jsonnet\n        output_path: manifests\n        output_type: yml\n    dependencies:\n      - output_path: lib/kube.libsonnet\n        source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube.libsonnet\n        type: https\n      - output_path: lib/kube-platforms.libsonnet\n        source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube-platforms.libsonnet\n        type: https\n      - output_path: components/generators/kubernetes\n        ref: master\n        source: https://github.com/kapicorp/kapitan-reference.git\n        subdir: components/generators/kubernetes\n        type: git\n      - output_path: components/generators/terraform\n        ref: master\n        source: https://github.com/kapicorp/kapitan-reference.git\n        subdir: components/generators/terraform\n        type: git\n    vars:\n      target: mysql\n  manifests: []\n  mysql:\n    settings:\n      client:\n        port: 3306\n        socket: /var/run/mysqld/mysqld.sock\n      mysqld:\n        bind-address: 127.0.0.1\n        max_allowed_packet: 64M\n        thread_concurrency: 8\n  namespace: mysql\n  scripts: []\n  target_name: mysql\n
"},{"location":"pages/commands/kapitan_lint/","title":"CLI Reference | kapitan lint","text":""},{"location":"pages/commands/kapitan_lint/#kapitan-lint","title":"kapitan lint","text":"

Perform a checkup on your inventory or refs.

./kapitan lint\n
click to expand output
Running yamllint on all inventory files...\n\n.yamllint not found. Using default values\nFile ./inventory/classes/components/echo-server.yml has the following issues:\n        95:29: forbidden implicit octal value \"0550\" (octal-values)\nFile ./inventory/classes/terraform/gcp/services.yml has the following issues:\n        15:11: duplication of key \"enable_compute_service\" in mapping (key-duplicates)\n\nTotal yamllint issues found: 2\n\nChecking for orphan classes in inventory...\n\nNo usage found for the following 6 classes:\n{'components.argoproj.cd.argocd-server-oidc',\n'components.helm.cert-manager-helm',\n'components.rabbitmq-operator.rabbitmq-configuration',\n'components.rabbitmq-operator.rabbitmq-operator',\n'features.gkms-demo',\n'projects.localhost.kubernetes.katacoda'}\n
"},{"location":"pages/commands/kapitan_searchvar/","title":"CLI Reference | kapitan searchvar","text":""},{"location":"pages/commands/kapitan_searchvar/#kapitan-searchvar","title":"kapitan searchvar","text":"

Shows all inventory files where a variable is declared:

./kapitan searchvar parameters.components.*.image\n
click to expand output
./inventory/classes/components/vault.yml                     ${vault:image}\n./inventory/classes/components/logstash.yml                  eu.gcr.io/antha-images/logstash:7.5.1\n./inventory/classes/components/gke-pvm-killer.yml            estafette/estafette-gke-preemptible-killer:1.2.5\n./inventory/classes/components/mysql.yml                     mysql:5.7.28\n./inventory/classes/components/postgres-proxy.yml            gcr.io/cloudsql-docker/gce-proxy:1.16\n./inventory/classes/components/echo-server.yml               jmalloc/echo-server\n./inventory/classes/components/trivy.yml                     ${trivy:image}\n./inventory/classes/components/filebeat.yml                  ${filebeat:image}:${filebeat:version}\n./inventory/classes/components/pritunl/pritunl-mongo.yml     docker.io/bitnami/mongodb:4.2.6-debian-10-r23\n./inventory/classes/components/pritunl/pritunl.yml           alledm/pritunl\n./inventory/classes/components/weaveworks/user-db.yml        weaveworksdemos/user-db:0.3.0\n./inventory/classes/components/weaveworks/catalogue.yml      weaveworksdemos/catalogue:0.3.5\n./inventory/classes/components/weaveworks/user.yml           weaveworksdemos/user:0.4.7\n./inventory/classes/components/weaveworks/session-db.yml     redis:alpine\n./inventory/classes/components/weaveworks/catalogue-db.yml   weaveworksdemos/catalogue-db:0.3.0\n./inventory/classes/components/weaveworks/carts-db.yml       mongo\n./inventory/classes/components/weaveworks/orders-db.yml      mongo\n./inventory/classes/components/weaveworks/orders.yml         weaveworksdemos/orders:0.4.7\n./inventory/classes/components/weaveworks/shipping.yml       weaveworksdemos/shipping:0.4.8\n./inventory/classes/components/weaveworks/queue-master.yml   weaveworksdemos/queue-master:0.3.1\n./inventory/classes/components/weaveworks/rabbitmq.yml       rabbitmq:3.6.8-management\n./inventory/classes/components/weaveworks/payment.yml        weaveworksdemos/payment:0.4.3\n./inventory/classes/components/weaveworks/front-end.yml      weaveworksdemos/front-end:0.3.12\n./inventory/classes/components/weaveworks/carts.yml          weaveworksdemos/carts:0.4.8\n./inventory/classes/components/kapicorp/tesoro.yml           kapicorp/tesoro\n
"},{"location":"pages/commands/kapitan_validate/","title":"CLI Reference | kapitan validate","text":""},{"location":"pages/commands/kapitan_validate/#kapitan-validate","title":"kapitan validate","text":"

Validates the schema of compiled output. Validate options are specified in the inventory under parameters.kapitan.validate. Supported types are:

"},{"location":"pages/commands/kapitan_validate/#usage","title":"Usage","text":"standalonemanual with kapitan compileautomatic with .kapitan dotfile
kapitan validate\n
click to expand output
created schema-cache-path at ./schemas\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml\n
kapitan compile --validate\n
click to expand output
Rendered inventory (0.27s)\nCompiled labels (0.23s)\nCompiled removal (0.00s)\nCompiled busybox (0.24s)\nCompiled minikube-nginx-jsonnet (0.49s)\nCompiled minikube-nginx-kadet (0.25s)\nCompiled minikube-mysql (0.59s)\nCompiled minikube-es (1.17s)\nCompiled all-glob (1.55s)\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml\n

You can leverage the .kapitan dotfile to make sure validate runs every time you run compile.

example .kapitan

...\n\ncompile:\n  validate: true\n

The validate command will now be implied for every compile run

kapitan compile\n

click to expand output
Rendered inventory (0.27s)\nCompiled labels (0.23s)\nCompiled removal (0.00s)\nCompiled busybox (0.24s)\nCompiled minikube-nginx-jsonnet (0.49s)\nCompiled minikube-nginx-kadet (0.25s)\nCompiled minikube-mysql (0.59s)\nCompiled minikube-es (1.17s)\nCompiled all-glob (1.55s)\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_secret.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_jsonnet.yml\nValidation: manifest validation successful for ./compiled/minikube-mysql/manifests/mysql_service_simple.yml\n
"},{"location":"pages/commands/kapitan_validate/#kubernetes-setup","title":"Kubernetes Setup","text":"

Kubernetes has different resource kinds, for instance:

  • service
  • deployment
  • statefulset

Kapitan has built in support for validation of Kubernetes kinds, and automatically integrates with https://kubernetesjsonschema.dev. See github.com/instrumenta/kubernetes-json-schema for more informations.

Info

Kapitan will automatically download the schemas for Kubernetes Manifests directly from https://kubernetesjsonschema.dev

By default, the schemas are cached into ./schemas/, which can be modified with the --schemas-path option.

override permanently schema-path

Remember to use the .kapitan dotfile configuration to override permanently the schema-path location.

$ cat .kapitan\n# other options abbreviated for clarity\nvalidate:\n  schemas-path: custom/schemas/cache/path\n
"},{"location":"pages/commands/kapitan_validate/#example","title":"Example","text":"

Refer to the mysql example.

kubernetes/inventory/classes/component/mysql.yml
    validate: \n    - type: kubernetes # mkdocs (1)! \n      output_paths: # mkdocs (2)! \n        - manifests/mysql_secret.yml\n      kind: secret # temporarily replaced with 'deployment' during test # mkdocs (3)! \n      version: 1.14.0 # optional, defaults to 1.14.0 # mkdocs (4)! \n    - type: kubernetes\n      output_paths:\n        - manifests/mysql_service_jsonnet.yml\n        - manifests/mysql_service_simple.yml\n      kind: service\n      version: 1.14.0\n
  1. type | currently only Kubernetes is supported
  2. output_paths | list of files to validate
  3. kind | a Kubernetes resource kind
  4. version | a Kubernetes API version, defaults to 1.14.0
"},{"location":"pages/contribute/code/","title":"Kapitan code","text":"

Many of our features come from contributions from external collaborators. Please help us improve Kapitan by extending it with your ideas, or help us squash bugs you discover.

It's simple, just send us a PR with your improvements!

","tags":["community"]},{"location":"pages/contribute/code/#submitting-code","title":"Submitting code","text":"

We would like ask you to fork Kapitan project and create a Pull Request targeting master branch. All submissions, including submissions by project members, require review.

","tags":["community"]},{"location":"pages/contribute/code/#setup","title":"Setup","text":"

We build kapitan using poetry.

  1. Install poetry

    pip install poetry\n
  2. Install dependencies

    poetry install --with test\n

    Poetry creates a virtual environment with the required dependencies installed.

  3. Run kapitan with the own compiled code

    poetry run kapitan <your command>\n

Because we are using a pinned version of reclass which is added as a submodule into Kapitan's repository, you need to pull it separately by executing the command below:

git submodule update --init\n
","tags":["community"]},{"location":"pages/contribute/code/#troubleshoot","title":"Troubleshoot","text":"

Check if gcc is installed:

brew install gcc@5\n
","tags":["community"]},{"location":"pages/contribute/code/#testing","title":"Testing","text":"

Run make test to run all tests. If you modify anything in the examples/ folder make sure you replicate the compiled result of that in tests/test_kubernetes_compiled. If you add new features, run make test_coverage && make test_formatting to make sure the test coverage remains at current or better levels and that code formatting is applied.

If you would like to evaluate your changes by running your version of Kapitan, you can do that by running bin/kapitan from this repository or even setting an alias to it.

python3 -m unittest tests/test_vault_transit.py\n
","tags":["community"]},{"location":"pages/contribute/code/#code-style","title":"Code Style","text":"

To make sure you adhere to the Style Guide for Python (PEP8) Python Black is used to apply the formatting so make sure you have it installed with pip3 install black.

","tags":["community"]},{"location":"pages/contribute/code/#apply-via-git-hook","title":"Apply via Git hook","text":"
  • Run pip3 install pre-commit to install precommit framework.
  • In the Kapitan root directory, run pre-commit install
  • Git add/commit any changed files you want.
","tags":["community"]},{"location":"pages/contribute/code/#apply-manually","title":"Apply manually","text":"

Run make format_codestyle before submitting.

","tags":["community"]},{"location":"pages/contribute/code/#release-process","title":"Release process","text":"
  • Create a branch named release-v<NUMBER>. Use v0.*.*-rc.* if you want pre-release versions to be uploaded.
  • Update CHANGELOG.md with the release changes.
  • Once reviewed and merged, Github Actions will auto-release.
  • The merge has to happen with a merge commit not with squash/rebase so that the commit message still mentions kapicorp/release-v* inside.
","tags":["community"]},{"location":"pages/contribute/code/#packaging-extra-resources-in-python-package","title":"Packaging extra resources in python package","text":"

To package any extra resources/files in the pip package, make sure you modify both MANIFEST.in.

","tags":["community"]},{"location":"pages/contribute/code/#leave-a-comment","title":"Leave a comment","text":"","tags":["community"]},{"location":"pages/contribute/documentation/","title":"Documentation","text":"

Our documentation usully prevents new users from adopting Kapitan. Help us improve by contributing with fixes and keeping it up-to-date.

","tags":["community"]},{"location":"pages/contribute/documentation/#articles","title":"Articles","text":"

Write articles on Kapitan and share your way of working. Inspire others, and reach out to have your article published / endorsed by us.

","tags":["community"]},{"location":"pages/contribute/documentation/#this-website","title":"This Website","text":"

Find something odd? Let us know or change it yourself: you can edit pages of this website on Github by clicking the pencil icon at the top right of this page!

","tags":["community"]},{"location":"pages/contribute/documentation/#update-documentation","title":"Update documentation","text":"

We use mkdocs to generate our gh-pages from .md files under docs/ folder.

Updating our gh-pages is therefore a two-step process.

","tags":["community"]},{"location":"pages/contribute/documentation/#update-the-markdown","title":"Update the markdown","text":"

Submit a PR for our master branch that updates the .md file(s). Test how the changes would look like when deployed to gh-pages by serving it on localhost:

  1. Edit the strict property in mkdocs.yml and set it to false.
  2. make local_serve_documentation
  3. Now the documentation site should be available at localhost:8000.
","tags":["community"]},{"location":"pages/contribute/documentation/#submit-a-pr","title":"Submit a PR","text":"

Once the above PR has been merged, use mkdocs gh-deploy command to push the commit that updates the site content to your own gh-pages branch. Make sure that you already have this gh-pages branch in your fork that is up-to-date with our gh-pages branch such that the two branches share the commit history (otherwise Github would not allow PRs to be created).

# locally, on master branch (which has your updated docs)\nCOMMIT_MSG=\"your commit message to replace\" make mkdocs_gh_deploy\n

After it's pushed, create a PR that targets our gh-pages branch from your gh-pages branch.

","tags":["community"]},{"location":"pages/contribute/sponsor/","title":"Sponsor Kapitan","text":"

Do you want to help the project? Great! There are many ways to do it

We accept donations throught GitHubs Sponsors. Alternatively reach out for other ways to support us.

Companies and individuals sponsoring us on a regular base will be recognised and called out on our website

","tags":["community"]},{"location":"pages/contribute/talk/","title":"Talk about Kapitan","text":"

Our project needs your support to get noticed! Please let everyone know that you are using Kapitan

  • Help us grow: give us a star
  • Join us on kubernetes.slack.com #kapitan(Get invited)
  • Tweet about us on Twitter . Remember to add @kapitandev to your tweets
  • Share our website https://kapitan.dev
  • Write tutorials and blog posts and join the many who have done it already! Get published on the Kapitan Blog
  • Share what Kapitan does for you and for your company
  • Inspire your colleagues and network on LinkedIn
"},{"location":"pages/input_types/copy/","title":"Copy","text":"

This input type simply copies the input templates to the output directory without any rendering/processing. For Copy, input_paths can be either a file or a directory: in case of a directory, all the templates in the directory will be copied and outputted to output_path.

Supported output types: N/A (no need to specify output_type)

Example

 kapitan:\n    compile:\n      - input_type: copy\n        ignore_missing: true  # Do not error if path is missing. Defaults to False\n        input_paths:\n          - resources/state/${target_name}/.terraform.lock.hcl\n        output_path: terraform/\n
"},{"location":"pages/input_types/external/","title":"External","text":"

This input type executes an external script or binary. This can be used to manipulate already compiled files or execute binaries outside of kapitan that generate or manipulate files.

For example, ytt is a useful yaml templating tool. It is not built into the kapitan binary, however, with the external input type, we could specify the ytt binary to be executed with specific arguments and environment variables.

In this example, we're removing a label from a k8s manifests in a directory ingresses and placing it into the compiled target directory.

parameters:\n  target_name: k8s-manifests\n  kapitan:\n    vars:\n      target: ${target_name}\n    compile:\n      - input_type: external\n        input_paths:\n          - /usr/local/bin/ytt # path to ytt on system\n        output_path: .\n        args:\n          - -f\n          - ingresses/ # directory with ingresses\n          - -f\n          - ytt/remove.yaml # custom ytt script\n          - \">\"\n          - \\${compiled_target_dir}/ingresses/ingresses.yaml # final merged result\n

Supported output types: N/A (no need to specify output_type)

Additionally, the input type supports field env_vars, which can be used to set environment variables for the external command. By default, the external command doesn't inherit any environment variables from Kapitan's environment. However, if environment variables $PATH or $HOME aren't set in env_vars, they will be propagated from Kapitan's environment to the external command's environment.

Finally, Kapitan will substitute ${compiled_target_dir} in both the command's arguments and the environment variables. This variable needs to be escaped in the configuration to ensure that reclass won't interpret it as a reclass reference.

"},{"location":"pages/input_types/helm/","title":"Input Type | Helm","text":"

This is a Python binding to helm template command for users with helm charts. This does not require the helm executable, and the templates are rendered without the Tiller server.

Unlike other input types, Helm input types support the following additional parameters under kapitan.compile:

parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path>\n      input_type: helm\n      input_paths:\n        - <chart_path>\n      helm_values:\n        <object_with_values_to_override>\n      helm_values_files:\n        - <values_file_path>\n      helm_path: <helm binary>\n      helm_params:\n        name: <chart_release_name>\n        namespace: <substitutes_.Release.Namespace>\n        output_file: <string>\n        validate: true\n        \u2026\n

helm_values is an object containing values specified that will override the default values in the input chart. This has exactly the same effect as specifying --values custom_values.yml for helm template command where custom_values.yml structure mirrors that of helm_values.

helm_values_files is an array containing the paths to helm values files used as input for the chart. This has exactly the same effect as specifying --file my_custom_values.yml for the helm template command where my_custom_values.yml is a helm values file. If the same keys exist in helm_values and in multiple specified helm_values_files, the last indexed file in the helm_values_files will take precedence followed by the preceding helm_values_files and at the bottom the helm_values defined in teh compile block. There is an example in the tests. The monitoring-dev(kapitan/tests/test_resources/inventory/targets/monitoring-dev.yml) and monitoring-prd(kapitan/tests/test_resources/inventory/targets/monitoring-prd.yml) targets both use the monitoring(tests/test_resources/inventory/classes/component/monitoring.yml) component. This component has helm chart input and takes a common.yml helm_values file which is \"shared\" by any target that uses the component and it also takes a dynamically defined file based on a kapitan variable defined in the target.

helm_path can be use to provide the helm binary name or path. helm_path defaults to the value of KAPITAN_HELM_PATH env var if it is set, else it defaults to helm

helm_params correspond to the flags for helm template. Most flags that helm supports can be used here by replacing '-' by '_' in the flag name.

Flags without argument must have a boolean value, all other flags require a string value.

Special flags:

  • name: equivalent of helm template [NAME] parameter. Ignored if name_template is also specified. If neither name_template nor name are specified, the --generate-name flag is used to generate a name.
  • output_file: name of the single file used to output all the generated resources. This is equivalent to call helm template without specifing output dir. If not specified, each resource is generated into a distinct file.

  • include_crds and skip_tests: These flags are enabled by default and should be set to false to be removed.

  • debug: prints the helm debug output in kapitan debug log.
  • namespace: note that due to the restriction on helm template command, specifying the namespace does not automatically add metadata.namespace property to the resources. Therefore, users are encouraged to explicitly specify it in all resources:

    metadata:\n  namespace: {{ .Release.Namespace }} # or any other custom values\n

See the helm doc for further detail.

"},{"location":"pages/input_types/helm/#example","title":"Example","text":"

Let's use nginx-ingress helm chart as the input. Using kapitan dependency manager, this chart can be fetched via a URL as listed in https://helm.nginx.com/stable/index.yaml.

On a side note, https://helm.nginx.com/stable/ is the chart repository URL which you would helm repo add, and this repository should contain index.yaml that lists out all the available charts and their URLs. By locating this index.yaml file, you can locate all the charts available in the repository.

We can use version 0.3.3 found at https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz. We can create a simple target file as inventory/targets/nginx-from-chart.yml whose content is as follows:

parameters:\n  kapitan:\n    vars:\n      target: nginx-from-chart\n    dependencies:\n    - type: https\n      source: https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz\n      unpack: True\n      output_path: components/charts\n    compile:\n      - output_path: .\n        input_type: helm\n        input_paths:\n          - components/charts/nginx-ingress\n        helm_values:\n          controller:\n            name: my-controller\n            image:\n              repository: custom_repo\n        helm_params:\n          name: my-first-release-name\n          namespace: my-first-namespace\n

To compile this target, run:

$ kapitan compile --fetch\nDependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : fetching now\nDependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : successfully fetched\nDependency https://helm.nginx.com/stable/nginx-ingress-0.3.3.tgz : extracted to components/charts\nCompiled nginx-from-chart (0.07s)\n

The chart is fetched before compile, which creates components/charts/nginx-ingress folder that is used as the input_paths for the helm input type. To confirm if the helm_values actually has overridden the default values, we can try:

$ grep \"my-controller\" compiled/nginx-from-chart/nginx-ingress/templates/controller-deployment.yaml\n  name: my-controller\n      app: my-controller\n        app: my-controller\n
"},{"location":"pages/input_types/helm/#building-the-binding-from-source","title":"Building the binding from source","text":"

Run

cd kapitan/inputs/helm\n./build.sh\n

This requires Go 1.14.

"},{"location":"pages/input_types/helm/#helm-subcharts","title":"Helm subcharts","text":"

There is an external dependency manager of type helm which enables you to specify helm charts to download, including subcharts.

"},{"location":"pages/input_types/introduction/","title":"Introduction","text":"

Note: make sure to read up on inventory before moving on.

"},{"location":"pages/input_types/introduction/#phases-of-the-compile-command","title":"Phases of the compile command","text":"

Now that we have a basic understanding of Kapitan inventory, we can talk about the kapitan compile command.

The command has five distinct phases:

graph LR\n  classDef pink fill:#f9f,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n  classDef blue fill:#00FFFF,stroke:#333,stroke-width:4px,color:#000,font-weight: bold;\n  INVENTORY[\"Inventory\"]:::pink\n\n  COMPILE[\"Compile\"]:::pink\n  FINISH[\"Finish\"]:::pink\n  COPY[\"Copy\"]:::pink\n\n\n  subgraph \"fetch\"\n    F{\"fetch?\"}\n    FETCH[\"fetch dependencies\"]\n  end \n\n  subgraph \"validate\"\n    V{\"validate?\"}\n    VALIDATE[\"Validate\"]\n  end\n\n  subgraph \"reveal\"\n    REVEAL[\"Reveal\"]\n    R{\"reveal?\"}\n  end\n\n  INVENTORY --> F\n  F --> |yes| FETCH\n  FETCH --> COMPILE\n  F ==> |no| COMPILE\n  COMPILE ==> R\n  R ==> |no| COPY\n  R --> |yes| REVEAL\n  REVEAL --> COPY\n  COPY --> V\n  V --> |yes| VALIDATE\n  V ==> |no| FINISH\n  VALIDATE --> FINISH\n\n
Step Flag Description Configuration Inventory Kapitan uses reclass to render a final version of the inventory. Fetch --fetch Kapitan fetches external dependencies parameters.kapitan.dependencies Compile Kapitan compiles the input types for each target parameters.kapitan.compile Reveal --reveal Kapitan reveals the secrets directly in the compiled output parameters.kapitan.secrets Copy Kapitan moves the output files from the tmp directory to /compiled Validate --validate Kapitan validates the schema of compiled output. parameters.kapitan.validate Finish Kapitan has completed all tasks"},{"location":"pages/input_types/introduction/#supported-input-types","title":"Supported input types","text":"

Input types can be specified in the inventory under kapitan.compile in the following format:

jinja2jsonnetkadethelmcopy
parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path_in_target_dir>\n      input_type: jinja2 \n      input_params: # (1)! \n      input_paths:\n        - directory/\n        - file\n        - globbed/path/*\n
  1. a dict passed to the template

Please see Jinja

parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path_in_target_dir>\n      input_type: jsonnet \n      prune: false # (1)! \n      input_paths:\n        - directory/\n        - file\n        - globbed/path/*\n      output_type: [`yaml` | `json`]\n
  1. (Default: global --prune)
parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path_in_target_dir>\n      input_type: kadet \n      prune: false # (1)! \n      input_paths:\n        - directory/\n        - file\n        - globbed/path/*\n      output_type: [`yaml` | `json`]\n
  1. (Default: global --prune)

Please see Kadet

parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path_in_target_dir>\n      input_type: helm \n      prune: false # (1)! \n      input_paths:\n        - directory/\n        - file\n        - globbed/path/*\n      output_type: <output_type_specific_to_input_type>\n
  1. (Default: global --prune)
parameters:\n  kapitan:\n    compile:\n    - output_path: <output_path_in_target_dir>\n      input_type: copy \n      prune: false # (1)! \n      input_paths:\n        - directory/\n        - file\n        - globbed/path/*\n      output_type: <output_type_specific_to_input_type>\n
  1. (Default: global --prune)
"},{"location":"pages/input_types/jinja/","title":"Input Type | Jinja2","text":"

This input type is probably the most simple input type to use: it is very versatile and is commonly used to create scripts and documentation files.

It renders jinja2 templates.

"},{"location":"pages/input_types/jinja/#example-configuration","title":"Example configuration","text":"

Here's some configuration from the nginx example

examples/kubernetes/inventory/classes/component/nginx-common.yml

  templates: #(1)!\n    - docs/nginx/README.md\n    - components/nginx-deploy.sh\n\n  kapitan:\n    compile:\n      - output_path: . #(2)!\n        input_type: jinja2\n        input_paths: ${templates} #(3)!\n
  1. We define a list with all the templates we want to compile with this input type
  2. Then input type will render the files a the root of the target compiled folder e.g. compiled/${target_name}
  3. We pass the list as input_paths

Notice how make use of variable interpolation to use the convenience of a list to add all the files we want to compile. You can now simply add to that list from any other place in the inventory that calls that class.

  • input_paths can either be a file, or a directory: in case of a directory, all the templates in the directory will be rendered.
  • input_params (optional) can be used to pass extra parameters, helpful when needing to use a similar template for multiple components in the same target.
"},{"location":"pages/input_types/jinja/#documentation","title":"Documentation","text":"

We usually store documentation templates under the templates/docs directory.

examples/kubernetes/docs/nginx/README.md

{% set i = inventory.parameters %}\n\n# Welcome to the README!\n\nTarget *{{ i.target_name }}* is running:\n\n* {{ i.nginx.replicas }} replicas of *nginx* running nginx image {{ i.nginx.image }}\n* on cluster {{ i.cluster.name }}\n

Compiled result

# Welcome to the README!\n\nTarget *minikube-nginx-jsonnet* is running:\n\n* 1 replicas of *nginx* running nginx image nginx:1:15.8\n* on cluster minikube\n
"},{"location":"pages/input_types/jinja/#scripts","title":"Scripts","text":"

When we use Jinja to render scripts, we tend to call them \"canned scripts\" to indicate that these scripts have everything needed to run without extra parameters.

We usually store script templates under the templates/scripts directory.

examples/kubernetes/components/nginx-deploy.sh

#!/bin/bash -e\nDIR=$(dirname ${BASH_SOURCE[0]})\n{% set i = inventory.parameters %} #(1)!\n\nKUBECTL=\"kubectl -n {{i.namespace}}\" #(2)!\n\n# Create namespace before anything else\n${KUBECTL} apply -f ${DIR}/pre-deploy/namespace.yml\n\nfor SECTION in manifests\ndo\n  echo \"## run kubectl apply for ${SECTION}\"\n  ${KUBECTL} apply -f ${DIR}/${SECTION}/ | column -t\ndone\n
  1. We import the inventory as a Jinja variable
  2. We use to set the namespace explicitly

Compiled result

#!/bin/bash -e\nDIR=$(dirname ${BASH_SOURCE[0]})\n #(1)!\n\nKUBECTL=\"kubectl -n minikube-nginx-jsonnet\" #(2)!\n\n# Create namespace before anything else\n${KUBECTL} apply -f ${DIR}/pre-deploy/namespace.yml\n\nfor SECTION in manifests\ndo\n  echo \"## run kubectl apply for ${SECTION}\"\n  ${KUBECTL} apply -f ${DIR}/${SECTION}/ | column -t\ndone\n
  1. The script is now a \"canned script\" and ready to be used for this specif target.
  2. You can see that the namespace has been replaced with the target's one.
"},{"location":"pages/input_types/jinja/#accessing-the-inventory","title":"Accessing the inventory","text":"

Templates will be provided at runtime with 3 variables:

  • inventory: To access the inventory for that specific target.
  • inventory_global: To access the inventory of all targets.
  • input_params: To access the optional dictionary provided to the input type.

Use of inventory_global

inventory_global can be used to generate a \"global\" README.md that contains a link to all generated targets.

| *Target*                                                               |\n|------------------------------------------------------------------------|\n{% for target in inventory_global | sort() %}\n{% set p = inventory_global[target].parameters %}\n|[{{target}}](../{{target}}/docs/README.md)                              |\n{% endfor %}\n

Compiled result

| *Target*                                                               |\n|------------------------------------------------------------------------|\n| [argocd](../argocd/docs/README.md)                                     |\n| [dev-sockshop](../dev-sockshop/docs/README.md)                         |\n| [echo-server](../echo-server/docs/README.md)                           |\n| [examples](../examples/docs/README.md)                                 |\n| [gke-pvm-killer](../gke-pvm-killer/docs/README.md)                     |\n| [global](../global/docs/README.md)                                     |\n| [guestbook-argocd](../guestbook-argocd/docs/README.md)                 |\n| [kapicorp-demo-march](../kapicorp-demo-march/docs/README.md)           |\n| [kapicorp-project-123](../kapicorp-project-123/docs/README.md)         |\n| [kapicorp-terraform-admin](../kapicorp-terraform-admin/docs/README.md) |\n| [mysql](../mysql/docs/README.md)                                       |\n| [postgres-proxy](../postgres-proxy/docs/README.md)                     |\n| [pritunl](../pritunl/docs/README.md)                                   |\n| [prod-sockshop](../prod-sockshop/docs/README.md)                       |\n| [sock-shop](../sock-shop/docs/README.md)                               |\n| [tesoro](../tesoro/docs/README.md)                                     |\n| [tutorial](../tutorial/docs/README.md)                                 |\n| [vault](../vault/docs/README.md)                                       |\n
"},{"location":"pages/input_types/jinja/#jinja2-custom-filters","title":"Jinja2 custom filters","text":"

We support the following custom filters for use in Jinja2 templates:

EncodingTimeRegexpfileglobboolternaryshufflereveal_maybe sha256yamltomlb64encodeb64decode

SHA256 hashing of text

{{ text | sha256 }}

Dump text as YAML

{{ text | yaml }}

Dump text as TOML

{{ text | toml }}

base64 encode text

{{ text | b64encode }}

base64 decode text

{{ text | b64decode }}

to_datetimestrftime

return datetime object for string

{{ \"2019-03-07 13:37:00\" | to_datetime }}

return current date string for format

{{ \"%a, %d %b %Y %H:%M\" | strftime }}

regex_replaceregex_escaperegex_searchregex_findall

perform a re.sub returning a string

{{ hello world | regex_replace(pattern=\"world\", replacement=\"kapitan\")}}

escape all regular expressions special characters from string

{{ \"+s[a-z].*\" | regex_escape}}

perform re.search and return the list of matches or a backref

{{ hello world | regex_search(\"world.*\")}}

perform re.findall and return the list of matches as array

{{ hello world | regex_findall(\"world.*\")}}

return list of matched regular files for glob

{{ ./path/file* | fileglob }}

return the bool for value

{{ yes | bool }}

value ? true_val : false_val

{{ condition | ternary(\"yes\", \"no\")}}

randomly shuffle elements of a list

{{ [1, 2, 3, 4, 5] | shuffle }}

reveal ref/secret tag only if compile --reveal flag is set

{{ \"?{base64:my_ref}\" | reveal_maybe}}

Tip

You can also provide path to your custom filter modules in CLI. By default, you can put your filters in lib/jinja2_filters.py and they will automatically get loaded.

"},{"location":"pages/input_types/jsonnet/","title":"Input Type | Jsonnet","text":"

Jsonnet is a superset of json format that includes features such as conditionals, variables and imports. Refer to jsonnet docs to understand how it works.

Note: unlike jinja2 templates, one jsonnet template can output multiple files (one per object declared in the file).

"},{"location":"pages/input_types/jsonnet/#accessing-the-inventory","title":"Accessing the inventory","text":"

Typical jsonnet files would start as follows:

local kap = import \"lib/kapitan.libjsonnet\"; #(1)!\nlocal inv = kap.inventory(); #(2)!\nlocal p = inv.parameters; #(3)!\n\n{\n    \"data_java_opts\": p.elasticsearch.roles.data.java_opts, #(4)!\n}\n
  1. Import the Kapitan inventory library.
  2. Assign the content of the full inventory for this specific target to the inv variable.
  3. Assign the content of the inventory.parameters to a variable p for convenience.
  4. Use the p variable fo access a specific intentory value

Note: The dictionary keys of the jsonnet object are used as filenames for the generated output files. If your jsonnet is not a dictionary, but is a valid json(net) object, then the output filename will be the same as the input filename. E.g. 'my_string' is inside templates/input_file.jsonnet so the generated output file will be named input_file.json for example and will contain \"my_string\".

"},{"location":"pages/input_types/jsonnet/#jinja2-templating","title":"Jinja2 templating","text":"

Kapitan allows you to compile a Jinja template from within Jsonnet:

local kap = import \"lib/kapitan.libjsonnet\";\n\n{\n    \"jon_snow\": kap.jinja2_template(\"templates/got.j2\", { is_dead: false }),\n}\n
"},{"location":"pages/input_types/jsonnet/#callback-functions","title":"Callback functions","text":"

In addition, importing kapitan.libjsonnet makes available the following native_callback functions gluing reclass to jsonnet (amongst others):

inventoryjinja2_templateyamlfile I/Osha256_stringgzip_b64jsonschema

returns a dictionary with the inventory for target

renders the jinja2 file with context specified

yaml_loadyaml_load_streamyaml_dumpyaml_dump_stream

returns a json string of the specified yaml file

returns a list of json strings of the specified yaml file

returns a string yaml from a json string

returns a string yaml stream from a json string

file_readfile_existsdir_files_listdir_files_read

reads the file specified

returns informative object if a file exists

returns a list of file in a dir

returns an object with keys - file_name and values - file contents

returns sha256 of string

returns base64 encoded gzip of obj

validates obj with schema, returns object with 'valid' and 'reason' keys

"},{"location":"pages/input_types/jsonnet/#jsonschema-validation","title":"Jsonschema validation","text":"

Given the follow example inventory:

mysql:\n  storage: 10G\n  storage_class: standard\n  image: mysql:latest\n

The yaml inventory structure can be validated with the new jsonschema() function:

local schema = {\n    type: \"object\",\n    properties: {\n        storage: { type: \"string\", pattern: \"^[0-9]+[MGT]{1}$\"},\n        image: { type: \"string\" },\n    }\n};\n// run jsonschema validation\nlocal validation = kap.jsonschema(inv.parameters.mysql, schema);\n// assert valid, otherwise error with validation.reason\nassert validation.valid: validation.reason;\n

If validation.valid is not true, it will then fail compilation and display validation.reason.

Fails validation because storage has an invalid pattern (10Z)

Jsonnet error: failed to compile /code/components/mysql/main.jsonnet:\nRUNTIME ERROR: '10Z' does not match '^[0-9]+[MGT]{1}$'\n\nFailed validating 'pattern' in schema['properties']['storage']:\n    {'pattern': '^[0-9]+[MGT]{1}$', 'type': 'string'}\n\nOn instance['storage']:\n    '10Z'\n\n/code/mysql/main.jsonnet:(19:1)-(43:2)\n\nCompile error: failed to compile target: minikube-mysql\n
"},{"location":"pages/input_types/kadet/","title":"Input Type | Kadet","text":"

Kadet is an extensible input type for Kapitan that enables you to generate templates using Python.

The key benefit being the ability to utilize familiar programing principles while having access to Kapitan's powerful inventory system.

A library that defines resources as classes using the Base Object class is required. These can then be utilized within components to render output.

The following functions are provided by the class BaseObj().

Method definitions:

  • new(): Provides parameter checking capabilities
  • body(): Enables in-depth parameter configuration

Method functions:

  • root(): Defines values that will be compiled into the output
  • need(): Ability to check & define input parameters
  • update_root(): Updates the template file associated with the class

A class can be a resource such as a Kubernetes Deployment as shown here:

class Deployment(BaseObj): # (1)!\n    def new(self): # (2)!\n        self.need(\"name\", \"name string needed\")\n        self.need(\"labels\", \"labels dict needed\")\n        self.need(\"containers\", \"containers dict needed\")\n        self.update_root(\"lib/kubelib/deployment.yml\")\n\n    def body(self): # (3)!\n        self.root.metadata.name = self.kwargs.name # (4)!\n        self.root.metadata.namespace = inv.parameters.target_name\n        self.root.spec.template.metadata.labels = self.kwargs.labels\n        self.root.spec.template.spec.containers = self.kwargs.containers\n
  1. The deployment is an BaseObj() which has two main functions.
  2. new(self) is used to perform parameter validation & template compilation
  3. body(self) is utilized to set those parameters to be rendered.
  4. self.root.metadata.name is a direct reference to a key in the corresponding yaml.

Kadet supports importing libraries as you would normally do with Python. These libraries can then be used by the components to generate the required output.

...\nkubelib = kadet.load_from_search_paths(\"kubelib\") #(1)!\n...\nname = \"nginx\"\nlabels = kadet.BaseObj.from_dict({\"app\": name})\nnginx_container = kubelib.Container( #(2)!\n    name=name, image=inv.parameters.nginx.image, ports=[{\"containerPort\": 80}]\n)\n...\ndef main():\n    output = kadet.BaseObj() #(3)!\n    output.root.nginx_deployment = kubelib.Deployment(name=name, labels=labels, containers=[nginx_container]) #(4)!\n    output.root.nginx_service = kubelib.Service( #(5)!\n        name=name, labels=labels, ports=[svc_port], selector=svc_selector\n    )\n    return output #(6)!\n
  1. We import a library called kubelib using load_from_search_paths()
  2. We use kubelib to create a Container
  3. We create an output of type BaseObj and we will be updating the root element of this output.
  4. We use kubelib to create a Deployment kind. The Deployment makes use of the Container created.
  5. We use kubelib to create a Service kind.
  6. We return the object. Kapitan will render everything under output.root

Kadet uses a library called addict to organise the parameters inline with the yaml templates. As shown above we create a BaseObject() named output. We update the root of this output with the data structure returned from kubelib. This output is what is then returned to kapitan to be compiled into the desired output type.

For a deeper understanding please refer to github.com/kapicorp/kadet

Supported output types:

  • yaml (default)
  • json
"},{"location":"pages/input_types/remove/","title":"Remove","text":"

This input type simply removes files or directories. This can be helpful if you can't control particular files generated during other compile inputs.

For example, to remove a file named copy_target, specify an entry to input_paths, compiled/${kapitan:vars:target}/copy_target.

parameters:\n  target_name: removal\n  kapitan:\n    vars:\n      target: ${target_name}\n    compile:\n      - input_type: copy\n        input_paths:\n          - copy_target\n        output_path: .\n      # test removal of a file\n      - input_type: remove\n        input_paths:\n          - compiled/${kapitan:vars:target}/copy_target\n        output_path: .\n

As a reminder, each input block within the compile array is run sequentially for a target in Kapitan. If we reversed the order of the inputs above like so:

parameters:\n  target_name: removal\n  kapitan:\n    vars:\n      target: ${target_name}\n    compile:\n      - input_type: remove\n        input_paths:\n          - compiled/${kapitan:vars:target}/copy_target\n        output_path: .\n      - input_type: copy\n        input_paths:\n          - copy_target\n        output_path: .\n

The first input block would throw an error because the copy input command hasn't run yet to produce the file being removed by the remove input block.

Supported output types: N/A (no need to specify output_type)

"},{"location":"pages/inventory/advanced/","title":"Advanced Inventory Features","text":""},{"location":"pages/inventory/advanced/#target-labels","title":"Target labels","text":"

Kapitan allows you to define labels in your inventory, which can then be used to group together targets with similar labels.

For instance you could define the following:

Defines a class to add the customer label to selected targets

inventory/classes/type/customer_project.yml

parameters:\n  customer_name: ${target_name} # Defaults to the target_name\n  kapitan:\n    labels:\n      customer: ${customer_name}\n

Apply the class to the target for customer acme

inventory/targets/customers/acme.yml

classes:\n...\n- type.customer_project\n\nparameters:\n...\n

You can now selectively compile targets for customer acme using the following (see see Labels for more details )

kapitan compile -l customer=acme\nCompiled acme (0.06s)\nCompiled acme-documentation (0.09s)\n
"},{"location":"pages/inventory/classes/","title":"Classes","text":""},{"location":"pages/inventory/classes/#usage","title":"Usage","text":"

The next thing you want to learn about the inventory are classes. A class is a yaml file containing a fragment of yaml that we want to import and merge into the inventory.

Classes are fragments of yaml: feature sets, commonalities between targets. Classes let you compose your Inventory from smaller bits, eliminating duplication and exposing all important parameters from a single, logically organised place. As the Inventory lets you reference other parameters in the hierarchy, classes become places where you can define something that will then get referenced from another section of the inventory, allowing for composition.

Classes are organised under the inventory/classes directory substructure. They are organised hierarchically in subfolders, and the way they can be imported into a target or other classes depends on their location relative to the inventory/classes directory.

"},{"location":"pages/inventory/classes/#importing-classes","title":"Importing classes","text":"

To import a class from within another file of the Inventory, you can follow these instructions:

  • take the file path relative to the inventory/classes/ directory
  • remove the .yml file extension
  • replace / with .

For example, this will import the class inventory/classes/applications/sock-shop.yaml

classes:\n- applications.sock-shop\n
"},{"location":"pages/inventory/classes/#definition","title":"Definition","text":"

Let's take a look at the common class which appears in the example above:

As explained, because the common.yaml is directly under the inventory/classes subdirectory, it can be imported directly into a target with:

classes:\n- common\n

If we open the file, we find another familiar yaml fragment.

inventory/classes/common.yml

classes:\n- kapitan.common\n\nparameters:\n  namespace: ${target_name}\n  target_name: ${_reclass_:name:short}\n

Notice that this class includes an import definition for another class, kapitan.common. We've already learned this means that kapitan will import a file on disk called inventory/classes/kapitan/common.yml

You can also see that in the parameters section we now encounter a new syntax which unlocks another powerful inventory feature: parameters interpolation!

"},{"location":"pages/inventory/introduction/","title":"Introduction","text":""},{"location":"pages/inventory/introduction/#overview","title":"Overview","text":"

The Inventory is a core component of Kapitan: this section aims to explain how it works and how to best take advantage of it.

The Inventory is a hierarchical YAML based structure which you use to capture anything that you want to make available to Kapitan, so that it can be passed on to its templating engines.

The first concept to learn about the Inventory is the target. A target is a file, found under the inventory/targets substructure, that tells Kapitan what you want to compile. It will usually map to something you want to do with Kapitan.

For instance, you might want to define a target for each environment that you want to deploy using Kapitan.

The Inventory lets you also define and reuse common configurations through YAML files that are referred to as classes: by listing classes into target, their content gets merged together and allows you to compose complex configurations without repetitions.

By combining target and classes, the Inventory becomes the SSOT for your whole configuration, and learning how to use it will unleash the real power of Kapitan.

Info

The Kapitan Inventory is based on an open source project called reclass and you can find the full documentation on our Github clone. However we discourage you to look directly at the reclass documentation before you learn more about Kapitan, because Kapitan uses a fork of reclass and greatly simplifies the reclass experience.

Info

Kapitan allows users to switch the inventory backend to reclass-rs. You can switch the backend to reclass-rs by passing --inventory-backend=reclass-rs on the command line. Alternatively, you can define the backend in the .kapitan config file.

See the reclass-rs inventory backend documentation for more details.

Note

Kapitan enforces very little structure for the Inventory, so that you can adapt it to your specific needs: this might be overwhelming at the beginning: don\u2019t worry, we will explain best practice and give guidelines soon.

By default, Kapitan will search for its Inventory under inventory/classes and inventory/targets.

inventory/\n\u251c\u2500\u2500 classes\n\u2502   \u251c\u2500\u2500 applications\n\u2502   \u251c\u2500\u2500 components\n\u2502   \u251c\u2500\u2500 features\n\u2502   \u251c\u2500\u2500 kapitan\n\u2502   \u251c\u2500\u2500 projects\n\u2502   \u2514\u2500\u2500 terraform\n\u2514\u2500\u2500 targets\n    \u251c\u2500\u2500 examples\n    \u251c\u2500\u2500 kapicorp\n    \u2514\u2500\u2500 terraform\n
"},{"location":"pages/inventory/parameters_interpolation/","title":"Parameters Interpolation","text":"

Note

as a shorthand, when we encounter deep yaml structures like the following:

parameters:\n  components:\n    nginx:\n      image: nginx:latest\n

Usually when we want to talk about the image subkey, we normally use either of the following:

  • parameters.components.nginx.image
  • components.nginx.image

However, when used in parameter expansion, remember to:

  • replace the . with :
  • omit the parameters initial key which is implied
  • wrap it into the ${} variable interpolation syntax

The correct way to reference parameters.nginx.image then becomes ${components:nginx:image}.

The Inventory allows you to refer to other values defined elsewhere in the structure, using parameter interpolation.

Given the example:

parameters:\n  cluster:\n    location: europe\n\n  application:\n    location: ${cluster:location}\n\n  namespace: ${target_name}\n  target_name: dev\n

Here we tell Kapitan that:

  • namespace should take the same value defined in target_name
  • target_name should take the literal string dev
  • application.location should take the same value as defined in cluster.location

It is important to notice that the inventory can refer to values defined in other classes, as long as they are imported by the target. So for instance with the following example

classes:\n  - project.production\n\n  parameters:\n    application:\n      location: ${cluster.location}\n

Here in this case application.location refers to a value location which has been defined elsewhere, perhaps (but not necessarily) in the project.production class.

Also notice that the class name (project.production) is not in any ways influencing the name or the structed of the yaml it imports into the file

"},{"location":"pages/inventory/reclass-rs/","title":"The reclass-rs inventory backend","text":""},{"location":"pages/inventory/reclass-rs/#overview","title":"Overview","text":"

Reclass-rs is a reimplementation of Kapitan's Reclass fork in Rust. Please note that the Rust implementation doesn't support all the features of Kapitan's Reclass fork yet.

However, reclass-rs improves rendering time for the inventory significantly, especially if you're making heavy use of parameter references in class includes. If some of the Reclass features or options that you're using are missing in reclass-rs, don't hesitate to open an issue in the reclass-rs project.

"},{"location":"pages/inventory/reclass-rs/#installation","title":"Installation","text":"

The reclass-rs Python package is an optional dependency of Kapitan. You can install it as follows:

pip install kapitan[reclass-rs]\n
"},{"location":"pages/inventory/reclass-rs/#usage","title":"Usage","text":"

To use the reclass-rs inventory backend, you need to pass --inventory-backend=reclass-rs on the command line. If you want to permanently switch to the reclass-rs inventory backend, you can select the inventory backend in the .kapitan config file:

global:\n  inventory-backend: reclass-rs\n
"},{"location":"pages/inventory/reclass-rs/#performance-comparison","title":"Performance comparison","text":"

For the performance comparison, a real Kapitan inventory which makes heavy use of parameter interpolation in class includes was rendered with both Reclass and reclass-rs. The example inventory that was used for the performance comparison contains 325 classes and 56 targets. The example inventory renders to a total of 25MB of YAML.

"},{"location":"pages/inventory/reclass-rs/#reclass","title":"Reclass","text":"
$ time kapitan inventory -v --inventory-backend=reclass > inv.yml\n[ ... some output omitted ... ]\nkapitan.resources DEBUG    Using reclass as inventory backend\nkapitan.inventory.inv_reclass DEBUG    Inventory reclass: No config file found. Using reclass inventory config defaults\nkapitan.inventory.inv_reclass DEBUG    Inventory rendering with reclass took 0:01:06.037057\n\nreal    1m23.840s\nuser    1m23.520s\nsys     0m0.287s\n

Reclass takes 1 minute and 6 seconds to render the example inventory. The rest of the runtime (roughly 18 seconds) is spent in writing the resulting 25MB of YAML to the output file.

"},{"location":"pages/inventory/reclass-rs/#reclass-rs","title":"reclass-rs","text":"
$ time kapitan inventory -v --inventory-backend=reclass-rs > inv-rs.yml\n[ ... some output omitted ... ]\nkapitan.resources DEBUG    Using reclass-rs as inventory backend\nkapitan.inventory.inv_reclass DEBUG    Inventory reclass: No config file found. Using reclass inventory config defaults\nreclass-config.yml entry 'storage_type=yaml_fs' not implemented yet, ignoring...\nreclass-config.yml entry 'inventory_base_uri=./inventory' not implemented yet, ignoring...\nreclass-config.yml entry 'allow_none_override=true' not implemented yet, ignoring...\nkapitan.inventory.inv_reclass_rs DEBUG    Inventory rendering with reclass-rs took 0:00:01.717107\n\nreal    0m19.921s\nuser    0m35.586s\nsys     0m1.066s\n

reclass-rs takes 1.7 seconds to render the example inventory. The rest of the runtime (roughly 18 seconds) is spent in writing the resulting 25MB of YAML to the output file.

"},{"location":"pages/inventory/targets/","title":"Targets","text":""},{"location":"pages/inventory/targets/#usage","title":"Usage","text":"

A target is a file that lives under the inventory/targets subdirectory, and that tells Kapitan what you want it to do for you.

Kapitan will recognise all YAML files in the inventory/targets subtree as targets.

Note

Only use .yml as extension for Inventory files. .yaml will not be recognised as a valid Inventory file.

What you do with a target is largely up to you and your setup. Common examples:

  • clusters: Map each target to a cluster, capturing all configurations needed for a given cluster. For instance: targets/clusters/production-cluster1.yml
  • applications: When using Kapitan to manage Kubernetes applications, you might define a target for everything that you would normally deploy in a single namespace, including all its resources, scripts, secrets and documentation. For instance: targets/mysql.yml
  • environments: You might have want to define a different target for each environment you have, like dev.yml, test.yml and prod.yml
  • cloud projects: When working with Terraform, it may be convenient to group target by cloud project. For instance: targets/gcp/projects/engineering-prod.yml.
  • single tenancy: When deploying a single-tenancy application, you might combine the approaches above, and have a target acme.yml that is used to define both Terraform and Kubernetes resources for a given tenant, perhaps also with some ArgoCD or Spinnaker pipelines to go with it.

Example

If you have configured your kapitan repository like in Quick Start instructions, you can run the commands we give during the course of this documentation.

kapitan compile

Compiled gke-pvm-killer (0.09s)\nCompiled vault (0.18s)\nCompiled pritunl (0.17s)\nCompiled mysql (0.07s)\nCompiled examples (0.25s)\nCompiled postgres-proxy (0.06s)\nCompiled echo-server (0.08s)\nCompiled global (0.05s)\nCompiled tutorial (0.09s)\nCompiled guestbook-argocd (0.08s)\nCompiled sock-shop (0.30s)\nCompiled kapicorp-demo-march (0.04s)\nCompiled kapicorp-project-123 (0.03s)\nCompiled kapicorp-terraform-admin (0.08s)\nCompiled tesoro (0.09s)\nCompiled prod-sockshop (0.34s)\nCompiled dev-sockshop (0.41s)\nCompiled argocd (2.53s)\n

When you run kapitan compile, you instruct Kapitan to generate for each given target a directory under compiled with the same name. Under this directory you will find all the files that have been generated by Kapitan for that target.

tree compiled/mysql/

compiled/mysql/\n\u251c\u2500\u2500 argocd\n\u251c\u2500\u2500 docs\n\u2502   \u251c\u2500\u2500 mysql-readme.md\n\u2502   \u2514\u2500\u2500 README.md\n\u251c\u2500\u2500 manifests\n\u2502   \u251c\u2500\u2500 mysql-bundle.yml\n\u2502   \u251c\u2500\u2500 mysql-config.yml\n\u2502   \u251c\u2500\u2500 mysql-namespace.yml\n\u2502   \u2514\u2500\u2500 mysql-secret.yml\n\u251c\u2500\u2500 pre-deploy\n\u251c\u2500\u2500 rabbitmq\n\u251c\u2500\u2500 scripts\n\u2514\u2500\u2500 terraform\n\n7 directories, 6 files\n
"},{"location":"pages/inventory/targets/#definition","title":"Definition","text":"

A typical target might look like this:

inventory/targets/acme/dev.yaml

classes:\n  - common\n  - components.acme.frontend\n  - components.acme.backend\n\nparameters:\n  target_name: dev\n

Note that it is made of 2 sections:

  • classes is a list of class files you will want to import.
  • parameters allows for local override of what is unique to this target.

Info

the kapitan key under the root parameters is reserved for kapitan usage. Some examples:

parameters:\n  kapitan:\n    compile:      # input types configuration section\n    dependencies: # dependencies configuration section to download resources\n    secrets:      # secret encryption/decryption configuration section\n    validate:     # items which indicate which compiled output to validate\n    vars:         # which are also passed down to input types as context\n
"},{"location":"tags/","title":"Tags","text":""},{"location":"tags/#community","title":"community","text":"
  • Proposals
  • Kapitan Code
  • Documentation
  • Sponsor Us
"},{"location":"tags/#kadet","title":"kadet","text":"
  • Kadet
"},{"location":"tags/#kubernetes","title":"kubernetes","text":"
  • Kadet
"}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml new file mode 100644 index 000000000..7b25bb35a --- /dev/null +++ b/dev/sitemap.xml @@ -0,0 +1,283 @@ + + + + https://kapitan.dev/dev/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/ADOPTERS/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/FAQ/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/getting_started/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/proposals/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/references/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/related/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/support/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/tags/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_0_kadet/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_10_azure_key_vault/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_11_hashicorp_vault_transit/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_1_external_dependencies/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_2_helm_charts_input_type/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_3_schema_validation/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_4_standalone_executable/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_5_ref_types_redesign/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_6_hashicorp_vault/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_7_remote_inventory/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_8_google_secret_management/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_8_modularize_kapitan/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/kap_proposals/kap_9_bring_your_own_helm/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/external_dependencies/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/kapitan_overview/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/remote_repositories/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/04/12/2022/kapitan-logo-5-years-of-kapitan/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0310/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/04/12/2022/kapitan-logo-new-kapitan-release--v0320/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/27/08/2023/kapitan-logo-deploying-keda-with-kapitan/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/blog/12/02/2024/kapitan-logo-new-kapitan-release--v0331/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_compile/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_dotfile/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_inventory/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_lint/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_searchvar/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/commands/kapitan_validate/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/contribute/code/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/contribute/documentation/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/contribute/sponsor/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/contribute/talk/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/copy/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/external/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/helm/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/introduction/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/jinja/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/jsonnet/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/kadet/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/input_types/remove/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/advanced/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/classes/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/introduction/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/parameters_interpolation/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/reclass-rs/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/pages/inventory/targets/ + 2024-08-31 + daily + + + https://kapitan.dev/dev/tags/ + 2024-08-31 + daily + + \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz new file mode 100644 index 000000000..5e6d21383 Binary files /dev/null and b/dev/sitemap.xml.gz differ diff --git a/dev/support/index.html b/dev/support/index.html new file mode 100644 index 000000000..64aee8061 --- /dev/null +++ b/dev/support/index.html @@ -0,0 +1,2057 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Ask for support - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dev/tags/index.html b/dev/tags/index.html new file mode 100644 index 000000000..34e5e4069 --- /dev/null +++ b/dev/tags/index.html @@ -0,0 +1,1989 @@ + + + + + + + + + + + + + + + + + + + + + Tags - Kapitan: Keep your ship together + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+ +
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/master b/master new file mode 120000 index 000000000..90012116c --- /dev/null +++ b/master @@ -0,0 +1 @@ +dev \ No newline at end of file diff --git a/versions.json b/versions.json index 62b8b73aa..ca411871e 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,11 @@ [ + { + "version": "dev", + "title": "dev", + "aliases": [ + "master" + ] + }, { "version": "0.33", "title": "0.33",