diff --git a/head/404.html b/head/404.html index 3602932c04f4..5c2a8b472e2e 100644 --- a/head/404.html +++ b/head/404.html @@ -14,8 +14,9 @@ + - + @@ -23,15 +24,18 @@ - + - + + + + @@ -68,7 +72,7 @@ - + @@ -113,6 +117,7 @@
@@ -132,29 +137,36 @@
-
- - - - - - - - - - - - - -
+ +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/FAQ/index.html b/head/FAQ/index.html index eb42061d0380..e63ddd14cf52 100644 --- a/head/FAQ/index.html +++ b/head/FAQ/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/airgap-install/index.html b/head/airgap-install/index.html index f50d1bad607b..e60cf238597a 100644 --- a/head/airgap-install/index.html +++ b/head/airgap-install/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/assets/javascripts/bundle.51198bba.min.js b/head/assets/javascripts/bundle.51198bba.min.js deleted file mode 100644 index 31bd0414c643..000000000000 --- a/head/assets/javascripts/bundle.51198bba.min.js +++ /dev/null @@ -1,29 +0,0 @@ -"use strict";(()=>{var Ri=Object.create;var gr=Object.defineProperty;var ki=Object.getOwnPropertyDescriptor;var Hi=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Pi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,on=Object.prototype.propertyIsEnumerable;var nn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&nn(e,r,t[r]);if(Ht)for(var r of Ht(t))on.call(t,r)&&nn(e,r,t[r]);return e};var an=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&on.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var $i=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of Hi(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=ki(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ri(Pi(e)):{},$i(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var cn=Pt((xr,sn)=>{(function(e,t){typeof xr=="object"&&typeof sn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function c(T){var Qe=T.type,De=T.tagName;return!!(De==="INPUT"&&s[Qe]&&!T.readOnly||De==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function f(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(a(r.activeElement)&&f(r.activeElement),n=!0)}function m(T){n=!1}function d(T){a(T.target)&&(n||c(T.target))&&f(T.target)}function h(T){a(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),G())}function G(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function oe(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,oe())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),G(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var fn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(f){return!1}},r=t(),n=function(f){var u={next:function(){var p=f.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(f){return encodeURIComponent(f).replace(/%20/g,"+")},i=function(f){return decodeURIComponent(String(f).replace(/\+/g," "))},s=function(){var f=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof f){var d=this;p.forEach(function(oe,N){d.append(N,oe)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),f._entries&&(f._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(c,f){typeof c!="string"&&(c=String(c)),f&&typeof f!="string"&&(f=String(f));var u=document,p;if(f&&(e.location===void 0||f!==e.location.href)){f=f.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=f,u.head.appendChild(p);try{if(p.href.indexOf(f)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+f+" due to "+T)}}var m=u.createElement("a");m.href=c,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=c,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!f)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,G=!0,oe=this;["append","delete","set"].forEach(function(T){var Qe=h[T];h[T]=function(){Qe.apply(h,arguments),v&&(G=!1,oe.search=h.toString(),G=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,G&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(c){Object.defineProperty(s,c,{get:function(){return this._anchorElement[c]},set:function(f){this._anchorElement[c]=f},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(c){a(c)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(c){this._anchorElement.search=c,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var c=this;return function(){return c.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(c){this._anchorElement.href=c,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(c){this._anchorElement.pathname=c},enumerable:!0},origin:{get:function(){var c={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],f=this._anchorElement.port!=c&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(f?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(c){},enumerable:!0},username:{get:function(){return""},set:function(c){},enumerable:!0}}),i.createObjectURL=function(c){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(c){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var Kr=Pt((Mt,qr)=>{/*! - * clipboard.js v2.0.11 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */(function(t,r){typeof Mt=="object"&&typeof qr=="object"?qr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ci}});var s=i(279),a=i.n(s),c=i(370),f=i.n(c),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var E=p()(O);return m("cut"),E},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[O?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var G=function(O,E){var H=v(O);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},oe=function(O){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof O=="string"?H=G(O,E):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?H=G(O.value,E):(H=p()(O),m("copy")),H},N=oe;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(E){return typeof E}:T=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},T(j)}var Qe=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=O.action,H=E===void 0?"copy":E,I=O.container,q=O.target,Me=O.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function wi(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function rn(j,O){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=f()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ci=Ai},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=f.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function c(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function f(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=c},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function c(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return f(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function f(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=c},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),f=document.createRange();f.selectNodeContents(i),c.removeAllRanges(),c.addRange(f),s=c.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function f(){c.off(i,f),s.apply(a,arguments)}return f._=s,this.on(i,f,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,f=a.length;for(c;c{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var ns=/["'&<>]/;Go.exports=os;function os(e){var t=""+e,r=ns.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(f[0]===6||f[0]===2)){r=0;continue}if(f[0]===3&&(!i||f[1]>i[0]&&f[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{c(n[m](d))}catch(h){p(i[0][3],h)}}function c(m){m.value instanceof et?Promise.resolve(m.value.v).then(f,u):p(i[0][2],m)}function f(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function ln(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),o(a,c,s.done,s.value)})}}function o(i,s,a,c){Promise.resolve(c).then(function(f){i({value:f,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: -`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` - `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),c=a.next();!c.done;c=a.next()){var f=c.value;f.remove(this)}}catch(v){t={error:v}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{mn(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)mn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function mn(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new En(r,n)},t}(F);var En=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,c=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Tn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Te=new Tn(Sn);var _=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Oe(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ni(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=Ni();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return pn(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return qi(e);if(pt(e))return Ki(e);if(Nt(e))return Qi(e);if(Kt(e))return On(e);if(Gt(e))return Yi(e);if(Jt(e))return Gi(e)}throw Qt(e)}function qi(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Ki(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Vn(function(){return new Zt}))}}function zn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(f){var u,p,m,d=0,h=!1,v=!1,G=function(){p==null||p.unsubscribe(),p=void 0},oe=function(){G(),u=m=void 0,h=v=!1},N=function(){var T=u;oe(),T==null||T.unsubscribe()};return y(function(T,Qe){d++,!v&&!h&&G();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,c))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,G(),p=$r(oe,o,$e),De.error($e)},complete:function(){h=!0,G(),p=$r(oe,s),De.complete()}}),U(T).subscribe(u))})(f)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),B())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Qn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Te),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Te),l(()=>rr(e)),V(rr(e)))}var Gn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Dr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),ga?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Dr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=va.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Bn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Xn=typeof WeakMap!="undefined"?new WeakMap:new Gn,Zn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ya.getInstance(),n=new Aa(t,r,this);Xn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Zn.prototype[e]=function(){var t;return(t=Xn.get(this))[e].apply(t,arguments)}});var Ca=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Zn}(),eo=Ca;var to=new x,Ra=$(()=>k(new eo(e=>{for(let t of e)to.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var ro=new x,ka=$(()=>k(new IntersectionObserver(e=>{for(let t of e)ro.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function sr(e){return ka.pipe(S(t=>t.observe(e)),g(t=>ro.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function no(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),B())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function oo(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function Ha(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Pa(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function io(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:oo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!Ha(n,r)}return!0}),pe());return Pa().pipe(g(t=>t?_:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function ao(){return new x}function so(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)so(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)so(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function co(){return location.hash.substring(1)}function Vr(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function $a(e){return L(b(window,"hashchange"),e).pipe(l(co),V(co()),A(t=>t.length>0),J(1))}function fo(e){return $a(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function zr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function uo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function Nr(e,t){return e.pipe(g(r=>r?t():_))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>_),g(r=>r.status!==200?Tt(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),J(1))}function po(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function lo(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function mo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(lo),V(lo()))}function ho(){return{width:innerWidth,height:innerHeight}}function bo(){return b(window,"resize",{passive:!0}).pipe(l(ho),V(ho()))}function vo(){return Q([mo(),bo()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(Z("size")),o=Q([n,r]).pipe(l(()=>Xe(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:c,y:f}])=>({offset:{x:s.x-c,y:s.y-f+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,c,f)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:c,error:f});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + - + diff --git a/head/autopilot/index.html b/head/autopilot/index.html index 6b04eae773e4..da0e4faef2b9 100644 --- a/head/autopilot/index.html +++ b/head/autopilot/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cis_benchmark/index.html b/head/cis_benchmark/index.html index 7ea7ba16c37c..c7f62dac8187 100644 --- a/head/cis_benchmark/index.html +++ b/head/cis_benchmark/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/index.html b/head/cli/index.html index 65cfecb86c1b..87fff7c7164a 100644 --- a/head/cli/index.html +++ b/head/cli/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s/index.html b/head/cli/k0s/index.html index c09f95f64287..efdcaf1e77b4 100644 --- a/head/cli/k0s/index.html +++ b/head/cli/k0s/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_airgap/index.html b/head/cli/k0s_airgap/index.html index f546b60009ec..9b6d6bed6971 100644 --- a/head/cli/k0s_airgap/index.html +++ b/head/cli/k0s_airgap/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_airgap_list-images/index.html b/head/cli/k0s_airgap_list-images/index.html index e2eafea5b89c..8cc63c28488a 100644 --- a/head/cli/k0s_airgap_list-images/index.html +++ b/head/cli/k0s_airgap_list-images/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_api/index.html b/head/cli/k0s_api/index.html index 69453cb84118..740207b3dfae 100644 --- a/head/cli/k0s_api/index.html +++ b/head/cli/k0s_api/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_backup/index.html b/head/cli/k0s_backup/index.html index 1c403ad7fc63..9d0b89716905 100644 --- a/head/cli/k0s_backup/index.html +++ b/head/cli/k0s_backup/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_completion/index.html b/head/cli/k0s_completion/index.html index 216a4135035a..ca6907ca75ba 100644 --- a/head/cli/k0s_completion/index.html +++ b/head/cli/k0s_completion/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_config/index.html b/head/cli/k0s_config/index.html index 265ba4f97cef..2ccb57549fdb 100644 --- a/head/cli/k0s_config/index.html +++ b/head/cli/k0s_config/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_config_create/index.html b/head/cli/k0s_config_create/index.html index a531b4d59eca..1787093cd2fb 100644 --- a/head/cli/k0s_config_create/index.html +++ b/head/cli/k0s_config_create/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_config_edit/index.html b/head/cli/k0s_config_edit/index.html index 20e2c7607ebf..ee8c5922eca0 100644 --- a/head/cli/k0s_config_edit/index.html +++ b/head/cli/k0s_config_edit/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_config_status/index.html b/head/cli/k0s_config_status/index.html index ee8b71cb0feb..1b5c40eb8e40 100644 --- a/head/cli/k0s_config_status/index.html +++ b/head/cli/k0s_config_status/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_config_validate/index.html b/head/cli/k0s_config_validate/index.html index 7124de5a657c..3c9d9083995b 100644 --- a/head/cli/k0s_config_validate/index.html +++ b/head/cli/k0s_config_validate/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_controller/index.html b/head/cli/k0s_controller/index.html index 2429b612aac2..457ec7ce3928 100644 --- a/head/cli/k0s_controller/index.html +++ b/head/cli/k0s_controller/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_ctr/index.html b/head/cli/k0s_ctr/index.html index f7a8740fd949..e80e273b470c 100644 --- a/head/cli/k0s_ctr/index.html +++ b/head/cli/k0s_ctr/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_docs/index.html b/head/cli/k0s_docs/index.html index 575efb3409fe..8b4e9f689a9a 100644 --- a/head/cli/k0s_docs/index.html +++ b/head/cli/k0s_docs/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_etcd/index.html b/head/cli/k0s_etcd/index.html index 2bf21bf97535..86e27c00e9bf 100644 --- a/head/cli/k0s_etcd/index.html +++ b/head/cli/k0s_etcd/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_etcd_leave/index.html b/head/cli/k0s_etcd_leave/index.html index f732f00a36b7..2f3c3f139b66 100644 --- a/head/cli/k0s_etcd_leave/index.html +++ b/head/cli/k0s_etcd_leave/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_etcd_member-list/index.html b/head/cli/k0s_etcd_member-list/index.html index a5098f8145ef..293cf8f319fe 100644 --- a/head/cli/k0s_etcd_member-list/index.html +++ b/head/cli/k0s_etcd_member-list/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_install/index.html b/head/cli/k0s_install/index.html index ffccefc08b40..7ad5393e8772 100644 --- a/head/cli/k0s_install/index.html +++ b/head/cli/k0s_install/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_install_controller/index.html b/head/cli/k0s_install_controller/index.html index fa758c7be4b3..84d8089acfc9 100644 --- a/head/cli/k0s_install_controller/index.html +++ b/head/cli/k0s_install_controller/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_install_worker/index.html b/head/cli/k0s_install_worker/index.html index 29b378a562c6..c6b1abb113bd 100644 --- a/head/cli/k0s_install_worker/index.html +++ b/head/cli/k0s_install_worker/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_kubeconfig/index.html b/head/cli/k0s_kubeconfig/index.html index 8c7302aad88e..c1108190dba4 100644 --- a/head/cli/k0s_kubeconfig/index.html +++ b/head/cli/k0s_kubeconfig/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_kubeconfig_admin/index.html b/head/cli/k0s_kubeconfig_admin/index.html index 16e675eb5280..70444c3d13be 100644 --- a/head/cli/k0s_kubeconfig_admin/index.html +++ b/head/cli/k0s_kubeconfig_admin/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_kubeconfig_create/index.html b/head/cli/k0s_kubeconfig_create/index.html index 2a38c9d9662c..9ec75014ce89 100644 --- a/head/cli/k0s_kubeconfig_create/index.html +++ b/head/cli/k0s_kubeconfig_create/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_kubectl/index.html b/head/cli/k0s_kubectl/index.html index 5eaeddefe050..7fc25fcfb252 100644 --- a/head/cli/k0s_kubectl/index.html +++ b/head/cli/k0s_kubectl/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_reset/index.html b/head/cli/k0s_reset/index.html index bc984362e751..4c809f48e59f 100644 --- a/head/cli/k0s_reset/index.html +++ b/head/cli/k0s_reset/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_restore/index.html b/head/cli/k0s_restore/index.html index 2985e1070930..f5f5c81c4be1 100644 --- a/head/cli/k0s_restore/index.html +++ b/head/cli/k0s_restore/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_start/index.html b/head/cli/k0s_start/index.html index c4abd13a27e2..264e631115ce 100644 --- a/head/cli/k0s_start/index.html +++ b/head/cli/k0s_start/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_status/index.html b/head/cli/k0s_status/index.html index 6fadb1d6c2eb..63b7943cd03c 100644 --- a/head/cli/k0s_status/index.html +++ b/head/cli/k0s_status/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_status_components/index.html b/head/cli/k0s_status_components/index.html index b9861a057209..daed0265f5f8 100644 --- a/head/cli/k0s_status_components/index.html +++ b/head/cli/k0s_status_components/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_stop/index.html b/head/cli/k0s_stop/index.html index 80fd569365db..81db72dba64a 100644 --- a/head/cli/k0s_stop/index.html +++ b/head/cli/k0s_stop/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_sysinfo/index.html b/head/cli/k0s_sysinfo/index.html index 5ecfceda7bab..e30ffdca8082 100644 --- a/head/cli/k0s_sysinfo/index.html +++ b/head/cli/k0s_sysinfo/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_token/index.html b/head/cli/k0s_token/index.html index ed968d9cbca3..1981d10f1fe6 100644 --- a/head/cli/k0s_token/index.html +++ b/head/cli/k0s_token/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_token_create/index.html b/head/cli/k0s_token_create/index.html index 852a20b0bf13..f60bc2edc09d 100644 --- a/head/cli/k0s_token_create/index.html +++ b/head/cli/k0s_token_create/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_token_invalidate/index.html b/head/cli/k0s_token_invalidate/index.html index fa6b5892d44a..bd936a2aaf3b 100644 --- a/head/cli/k0s_token_invalidate/index.html +++ b/head/cli/k0s_token_invalidate/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_token_list/index.html b/head/cli/k0s_token_list/index.html index 051f794e20e7..002b84065568 100644 --- a/head/cli/k0s_token_list/index.html +++ b/head/cli/k0s_token_list/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_token_pre-shared/index.html b/head/cli/k0s_token_pre-shared/index.html index a552416218e3..3c91a62b6f4a 100644 --- a/head/cli/k0s_token_pre-shared/index.html +++ b/head/cli/k0s_token_pre-shared/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_version/index.html b/head/cli/k0s_version/index.html index 83d87db3f6ac..707f767d9846 100644 --- a/head/cli/k0s_version/index.html +++ b/head/cli/k0s_version/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cli/k0s_worker/index.html b/head/cli/k0s_worker/index.html index 1987a0d6600b..863e77d32d3e 100644 --- a/head/cli/k0s_worker/index.html +++ b/head/cli/k0s_worker/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/cloud-providers/index.html b/head/cloud-providers/index.html index ad0b66ad5326..ba891708bb06 100644 --- a/head/cloud-providers/index.html +++ b/head/cloud-providers/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/commercial-support/index.html b/head/commercial-support/index.html index fd41291a80ff..42ad19c22a8f 100644 --- a/head/commercial-support/index.html +++ b/head/commercial-support/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/configuration-validation/index.html b/head/configuration-validation/index.html index 531d4ec626c7..448878c32169 100644 --- a/head/configuration-validation/index.html +++ b/head/configuration-validation/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/configuration/index.html b/head/configuration/index.html index 4184d75ef776..4aed6d5506d5 100644 --- a/head/configuration/index.html +++ b/head/configuration/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/conformance-testing/index.html b/head/conformance-testing/index.html index 509e05c8868a..f4c9859d60f1 100644 --- a/head/conformance-testing/index.html +++ b/head/conformance-testing/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/contributors/github_workflow/index.html b/head/contributors/github_workflow/index.html index aad806b1cbd2..00bfdb7210a3 100644 --- a/head/contributors/github_workflow/index.html +++ b/head/contributors/github_workflow/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/contributors/overview/index.html b/head/contributors/overview/index.html index e5e5c6126d8a..51851750bc98 100644 --- a/head/contributors/overview/index.html +++ b/head/contributors/overview/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/contributors/testing/index.html b/head/contributors/testing/index.html index 6b2753ddf7ad..9ca9503a8e8c 100644 --- a/head/contributors/testing/index.html +++ b/head/contributors/testing/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/custom-ca/index.html b/head/custom-ca/index.html index 5273c683d771..9a8186ac2509 100644 --- a/head/custom-ca/index.html +++ b/head/custom-ca/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -86,7 +90,7 @@
- + Skip to content @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/dual-stack/index.html b/head/dual-stack/index.html index 348d2be62d3f..7ce1ea088455 100644 --- a/head/dual-stack/index.html +++ b/head/dual-stack/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/dynamic-configuration/index.html b/head/dynamic-configuration/index.html index 7c0ef486190b..e30528369d0e 100644 --- a/head/dynamic-configuration/index.html +++ b/head/dynamic-configuration/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/environment-variables/index.html b/head/environment-variables/index.html index 25accf4d3ca2..337990140ae7 100644 --- a/head/environment-variables/index.html +++ b/head/environment-variables/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/ambassador-ingress/index.html b/head/examples/ambassador-ingress/index.html index 590e6953ce43..d09eb8b42c94 100644 --- a/head/examples/ambassador-ingress/index.html +++ b/head/examples/ambassador-ingress/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/ansible-playbook/index.html b/head/examples/ansible-playbook/index.html index 8450487f032a..62d4c0d45557 100644 --- a/head/examples/ansible-playbook/index.html +++ b/head/examples/ansible-playbook/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/gitops-flux/index.html b/head/examples/gitops-flux/index.html index 6b17d4d2b3da..72e1f68abf5d 100644 --- a/head/examples/gitops-flux/index.html +++ b/head/examples/gitops-flux/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/metallb-loadbalancer/index.html b/head/examples/metallb-loadbalancer/index.html index 9c2a2bbce55d..0ae414c2005a 100644 --- a/head/examples/metallb-loadbalancer/index.html +++ b/head/examples/metallb-loadbalancer/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/nginx-ingress/index.html b/head/examples/nginx-ingress/index.html index 2598110e685c..4d4d53586b76 100644 --- a/head/examples/nginx-ingress/index.html +++ b/head/examples/nginx-ingress/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/oidc/oidc-provider-configuration/index.html b/head/examples/oidc/oidc-provider-configuration/index.html index 4456742e3efe..0e42a98548fb 100644 --- a/head/examples/oidc/oidc-provider-configuration/index.html +++ b/head/examples/oidc/oidc-provider-configuration/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/examples/traefik-ingress/index.html b/head/examples/traefik-ingress/index.html index a82776adbf17..5fef1e88a59e 100644 --- a/head/examples/traefik-ingress/index.html +++ b/head/examples/traefik-ingress/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/experimental-windows/index.html b/head/experimental-windows/index.html index 84ae0ee88301..8a8a8437bd7c 100644 --- a/head/experimental-windows/index.html +++ b/head/experimental-windows/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/external-runtime-deps/index.html b/head/external-runtime-deps/index.html index ede9eea75568..dc167eca1fe6 100644 --- a/head/external-runtime-deps/index.html +++ b/head/external-runtime-deps/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/helm-charts/index.html b/head/helm-charts/index.html index b08372817d08..1267afa1148b 100644 --- a/head/helm-charts/index.html +++ b/head/helm-charts/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/high-availability/index.html b/head/high-availability/index.html index 39692825da19..d04e3edb61e5 100644 --- a/head/high-availability/index.html +++ b/head/high-availability/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/internal/upgrading-calico/index.html b/head/internal/upgrading-calico/index.html index 5b43941cd662..739062ae0d3f 100644 --- a/head/internal/upgrading-calico/index.html +++ b/head/internal/upgrading-calico/index.html @@ -16,8 +16,9 @@ + - + @@ -25,15 +26,18 @@ - + - + + + + @@ -70,7 +74,7 @@ - + @@ -120,6 +124,7 @@
@@ -139,29 +144,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/k0s-in-docker/index.html b/head/k0s-in-docker/index.html index 9b74f8cc7abe..5dea9945c1c7 100644 --- a/head/k0s-in-docker/index.html +++ b/head/k0s-in-docker/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/k0s-multi-node/index.html b/head/k0s-multi-node/index.html index 356b51e738a6..b9196c12d855 100644 --- a/head/k0s-multi-node/index.html +++ b/head/k0s-multi-node/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/networking/index.html b/head/networking/index.html index 2aff87f8275e..279c550e4201 100644 --- a/head/networking/index.html +++ b/head/networking/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/nllb/index.html b/head/nllb/index.html index 40ddb6b1f1a2..7265235addea 100644 --- a/head/nllb/index.html +++ b/head/nllb/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+
- + - + diff --git a/head/podsecurity/index.html b/head/podsecurity/index.html index 682f049b8e35..5a20553f3396 100644 --- a/head/podsecurity/index.html +++ b/head/podsecurity/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/raspberry-pi4/index.html b/head/raspberry-pi4/index.html index e1521f30bc2c..ac916b43bdbc 100644 --- a/head/raspberry-pi4/index.html +++ b/head/raspberry-pi4/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/remove_controller/index.html b/head/remove_controller/index.html index a2dd764fb4be..368f0082a196 100644 --- a/head/remove_controller/index.html +++ b/head/remove_controller/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/reset/index.html b/head/reset/index.html index b4e4e5fb137d..8b7add4e485e 100644 --- a/head/reset/index.html +++ b/head/reset/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/search/search_index.json b/head/search/search_index.json index 6cdd26d41aea..0298b90f4c4a 100644 --- a/head/search/search_index.json +++ b/head/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"k0s - The Zero Friction Kubernetes","text":"

k0s is an open source, all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster. Due to its simple design, flexible deployment options and modest system requirements, k0s is well suited for

  • Any cloud
  • Bare metal
  • Edge and IoT

k0s drastically reduces the complexity of installing and running a CNCF certified Kubernetes distribution. With k0s new clusters can be bootstrapped in minutes and developer friction is reduced to zero. This allows anyone with no special skills or expertise in Kubernetes to easily get started.

k0s is distributed as a single binary with zero host OS dependencies besides the host OS kernel. It works with any Linux without additional software packages or configuration. Any security vulnerabilities or performance issues can be fixed directly in the k0s distribution that makes it extremely straightforward to keep the clusters up-to-date and secure.

"},{"location":"#what-happened-to-github-stargazers","title":"What happened to Github stargazers?","text":"

In September 2022 we made a human error while creating some build automation scripts&tools for the Github repository. Our automation accidentally changed the repo to a private one for few minutes. That itself is not a big deal and everything was restored quickly. But the nasty side effect is that it also removed all the stargazers at that point. :(

Before that mishap we had 4776 stargazers, making k0s one of the most popular Kubernetes distro out there.

**So if you are reading this, and have not yet starred k0s repo we would highly appreciate the :star: to get our numbers closer to what they used to be.

"},{"location":"#key-features","title":"Key Features","text":"
  • Certified and 100% upstream Kubernetes
  • Multiple installation methods: single-node, multi-node, airgap and Docker
  • Automatic lifecycle management with k0sctl: upgrade, backup and restore
  • Modest system requirements (1 vCPU, 1 GB RAM)
  • Available as a single binary with no external runtime dependencies besides the kernel
  • Flexible deployment options with control plane isolation as default
  • Scalable from a single node to large, high-available clusters
  • Supports custom Container Network Interface (CNI) plugins (Kube-Router is the default, Calico is offered as a preconfigured alternative)
  • Supports custom Container Runtime Interface (CRI) plugins (containerd is the default)
  • Supports all Kubernetes storage options with Container Storage Interface (CSI), includes OpenEBS host-local storage provider
  • Supports a variety of datastore backends: etcd (default for multi-node clusters), SQLite (default for single node clusters), MySQL, and PostgreSQL
  • Supports x86-64, ARM64 and ARMv7
  • Includes Konnectivity service, CoreDNS and Metrics Server
"},{"location":"#getting-started","title":"Getting Started","text":"

Quick Start Guide for creating a full Kubernetes cluster with a single node.

"},{"location":"#demo","title":"Demo","text":""},{"location":"#community-support","title":"Community Support","text":"
  • Lens Forums - Request for support and help from the Lens and k0s community.
  • GitHub Issues - Submit your issues and feature requests via GitHub.

We welcome your help in building k0s! If you are interested, we invite you to check out the Contributing Guide and the Code of Conduct.

"},{"location":"#commercial-support","title":"Commercial Support","text":"

Mirantis offers technical support, professional services and training for k0s. The support subscriptions include, for example, prioritized support (Phone, Web, Email) and access to verified extensions on top of your k0s cluster.

For any k0s inquiries, please contact us via email info@k0sproject.io.

"},{"location":"CODE_OF_CONDUCT/","title":"k0s Community Code Of Conduct","text":"

Please refer to our contributor code of conduct.

"},{"location":"FAQ/","title":"Frequently asked questions","text":""},{"location":"FAQ/#how-is-k0s-pronounced","title":"How is k0s pronounced?","text":"

kay-zero-ess

"},{"location":"FAQ/#how-do-i-run-a-single-node-cluster","title":"How do I run a single node cluster?","text":"

The cluster can be started with:

k0s controller --single\n

See also the Getting Started tutorial.

"},{"location":"FAQ/#how-do-i-connect-to-the-cluster","title":"How do I connect to the cluster?","text":"

You find the config in ${DATADIR}/pki/admin.conf (default: /var/lib/k0s/pki/admin.conf). Copy this file, and change the localhost entry to the public ip of the controller. Use the modified config to connect with kubectl:

export KUBECONFIG=/path/to/admin.conf\nkubectl ...\n
"},{"location":"FAQ/#why-doesnt-kubectl-get-nodes-list-the-k0s-controllers","title":"Why doesn't kubectl get nodes list the k0s controllers?","text":"

As a default, the control plane does not run kubelet at all, and will not accept any workloads, so the controller will not show up on the node list in kubectl. If you want your controller to accept workloads and run pods, you do so with: k0s controller --enable-worker (recommended only as test/dev/POC environments).

"},{"location":"FAQ/#is-k0sproject-really-open-source","title":"Is k0sproject really open source?","text":"

Yes, k0sproject is 100% open source. The source code is under Apache 2 and the documentation is under the Creative Commons License. Mirantis, Inc. is the main contributor and sponsor for this OSS project: building all the binaries from upstream, performing necessary security scans and calculating checksums so that it's easy and safe to use. The use of these ready-made binaries are subject to Mirantis EULA and the binaries include only open source software.

"},{"location":"airgap-install/","title":"Airgap install","text":"

You can install k0s in an environment with restricted Internet access. Airgap installation requires an image bundle, which contains all the needed container images. There are two options to get the image bundle:

  • Use a ready-made image bundle, which is created for each k0s release. It can be downloaded from the releases page.
  • Create your own image bundle. In this case, you can easily customize the bundle to also include container images, which are not used by default in k0s.
"},{"location":"airgap-install/#prerequisites","title":"Prerequisites","text":"

In order to create your own image bundle, you need

  • A working cluster with at least one controller, to be used to build the image bundle. For more information, refer to the Quick Start Guide.
  • The containerd CLI management tool ctr, installed on the worker machine (refer to the ContainerD getting-started guide).
"},{"location":"airgap-install/#1-create-your-own-image-bundle-optional","title":"1. Create your own image bundle (optional)","text":"

k0s/containerd uses OCI (Open Container Initiative) bundles for airgap installation. OCI bundles must be uncompressed. As OCI bundles are built specifically for each architecture, create an OCI bundle that uses the same processor architecture (x86-64, ARM64, ARMv7) as on the target system.

k0s offers two methods for creating OCI bundles, one using Docker and the other using a previously set up k0s worker. Be aware, though, that you cannot use the Docker method for the ARM architectures due to kube-proxy image multiarch manifest problem.

Note: k0s strictly matches image architecture, e.g. arm/v7 images won't work for arm64.

"},{"location":"airgap-install/#docker","title":"Docker","text":"
  1. Pull the images.

    k0s airgap list-images | xargs -I{} docker pull {}\n
  2. Create a bundle.

    docker image save $(k0s airgap list-images | xargs) -o bundle_file\n
"},{"location":"airgap-install/#previously-set-up-k0s-worker","title":"Previously set up k0s worker","text":"

As containerd pulls all the images during the k0s worker normal bootstrap, you can use it to build the OCI bundle with images.

Use the following commands on a machine with an installed k0s worker:

ctr --namespace k8s.io \\\n--address /run/k0s/containerd.sock \\\nimages export bundle_file $(k0s airgap list-images | xargs)\n
"},{"location":"airgap-install/#2a-sync-the-bundle-file-with-the-airgapped-machine-locally","title":"2a. Sync the bundle file with the airgapped machine (locally)","text":"

Copy the bundle_file you created in the previous step or downloaded from the releases page to the target machine into the images directory in the k0s data directory. Copy the bundle only to the worker nodes. Controller nodes don't use it.

# mkdir -p /var/lib/k0s/images\n# cp bundle_file /var/lib/k0s/images/bundle_file\n
"},{"location":"airgap-install/#2b-sync-the-bundle-file-with-the-airgapped-machines-remotely-with-k0sctl","title":"2b. Sync the bundle file with the airgapped machines (remotely with k0sctl)","text":"

As an alternative to the previous step, you can use k0sctl to upload the bundle file to the worker nodes. k0sctl can also be used to upload k0s binary file to all nodes. Take a look at this example (k0sctl.yaml) with one controller and one worker node to upload the bundle file and k0s binary:

apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: 1.27.5+k0s.0\nhosts:\n- role: controller\nssh:\naddress: <controller-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\n\n#  uploadBinary: <boolean>\n#    When true the k0s binaries are cached and uploaded\n#    from the host running k0sctl instead of downloading\n#    directly to the target host.\nuploadBinary: true\n\n#  k0sBinaryPath: <local filepath>\n#    Upload a custom or manually downloaded k0s binary\n#    from a local path on the host running k0sctl to the\n#    target host.\n# k0sBinaryPath: path/to/k0s_binary/k0s\n\n- role: worker\nssh:\naddress: <worker-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\nuploadBinary: true\nfiles:\n# This airgap bundle file will be uploaded from the k0sctl\n# host to the specified directory on the target host\n- src: /local/path/to/bundle-file/airgap-bundle-amd64.tar\ndstDir: /var/lib/k0s/images/\nperm: 0755\n
"},{"location":"airgap-install/#3-ensure-pull-policy-in-the-k0syaml-optional","title":"3. Ensure pull policy in the k0s.yaml (optional)","text":"

Use the following k0s.yaml to ensure that containerd does not pull images for k0s components from the Internet at any time.

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nimages:\ndefault_pull_policy: Never\n
"},{"location":"airgap-install/#4-set-up-the-controller-and-worker-nodes","title":"4. Set up the controller and worker nodes","text":"

Refer to the Manual Install for information on setting up the controller and worker nodes locally. Alternatively, you can use k0sctl.

Note: During the worker start up k0s imports all bundles from the $K0S_DATA_DIR/images before starting kubelet.

"},{"location":"architecture/","title":"Architecture","text":"

Note: As k0s is a new and dynamic project, the product architecture may occasionally outpace the documentation. The high level concepts and patterns, however, should always apply.

"},{"location":"architecture/#packaging","title":"Packaging","text":"

The k0s package is a single, self-extracting binary that embeds Kubernetes binaries, the benefits of which include:

  • Statically compiled
  • No OS-level dependencies
  • Requires no RPMs, dependencies, snaps, or any other OS-specific packaging
  • Provides a single package for all operating systems
  • Allows full version control for each dependency

"},{"location":"architecture/#control-plane","title":"Control plane","text":"

As a single binary, k0s acts as the process supervisor for all other control plane components. As such, there is no container engine or kubelet running on controllers by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.

Using k0s you can create, manage, and configure each of the components, running each as a \"naked\" process. Thus, there is no container engine running on the controller node.

"},{"location":"architecture/#storage","title":"Storage","text":"

Kubernetes control plane typically supports only etcd as the datastore. k0s, however, supports many other datastore options in addition to etcd, which it achieves by including kine. Kine allows the use of a wide variety of backend data stores, such as MySQL, PostgreSQL, SQLite, and dqlite (refer to the spec.storage documentation).

In the case of k0s managed etcd, k0s manages the full lifecycle of the etcd cluster. For example, by joining a new controller node with k0s controller \"long-join-token\" k0s atomatically adjusts the etcd cluster membership info to allow the new member to join the cluster.

Note: k0s cannot shrink the etcd cluster. As such, to shut down the k0s controller on a node that node must first be manually removed from the etcd cluster.

"},{"location":"architecture/#worker-node","title":"Worker node","text":"

As with the control plane, with k0s you can create and manage the core worker components as naked processes on the worker node.

By default, k0s workers use containerd as a high-level runtime and runc as a low-level runtime. Custom runtimes are also supported, refer to Using custom CRI runtime.

"},{"location":"autopilot-multicommand/","title":"Multi-Command Plans","text":"

Autopilot relies on a Plan for defining the Commands that should be executed, the Signal Nodes that each should be run on, and the status of each Command.

A Plan:

  • Defines one or many Commands that specify what actions should be performed.
  • Specifies how Signal Nodes should be discovered per-Command.
  • Saves the status of the Plan execution by resolved Signal Nodes

A Command:

  • An instructional step inside of a Plan that is applied against a Signal Node

A Signal Node:

  • Any node (controller or worker) that can receive updates with Autopilot.
"},{"location":"autopilot-multicommand/#execution","title":"Execution","text":"

The execution of a Plan is the result of processing Commands through a number of Processing States.

When a Plan is executed, each of the Commands are executed in the order of their appearance in the Plan.

  • A Plan transitions to the next Command only when the current Command posts a state of Completed.
  • Any Command that posts one of the recognized Error States will result in the current Command and Plan to abort processing.
    • The status of the Command and Plan will reflect this.
  • A Plan is considered finished only when all of its defined Commands post a Completed state.
"},{"location":"autopilot-multicommand/#status","title":"Status","text":"

The progress and state of each Command is recorded in the Plan status.

  • Every Command in the Plan has an associated status entry with the same index as the Command
    • ie. The second Command in a Plan has an index of 1, and so does its status.
  • The status of all Commands is taken into consideration when determining if the Plan is finished.
"},{"location":"autopilot-multicommand/#example","title":"Example","text":"

The following is an example of a Plan that has been applied as is currently being processed by autopilot.

(line numbers added for commentary below)

 1: apiVersion: autopilot.k0sproject.io/v1beta2\n2:  kind: Plan\n3:  metadata:\n4:    annotations:\n5:      <omitted>\n6:  spec:\n7:    commands:\n8:    - airgapupdate:\n9:        version: v1.27.5+k0s.0\n10:        platforms:\n11:          linux-amd64:\n12:            url: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-airgap-bundle-v1.27.5+k0s.0-amd64\n13:        workers:\n14:          discovery:\n15:            static:\n16:              nodes:\n17:              - worker0\n18:    - k0supdate:\n19:        version: v1.27.5+k0s.0\n20:        platforms:\n21:          linux-amd64:\n22:            url: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-amd64\n23:        targets:\n24:          controllers:\n25:            discovery:\n26:              static:\n27:                nodes:\n28:                - controller0\n29:          workers:\n30:            discovery:\n31:              static:\n32:                nodes:\n33:                - worker0\n34:    id: id123\n35:    timestamp: now\n36:  status:\n37:    commands:\n38:    - airgapupdate:\n39:        workers:\n40:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n41:          name: worker0\n42:          state: SignalSent\n43:      id: 0\n44:      state: SchedulableWait\n45:    - id: 1\n46:      k0supdate:\n47:        controllers:\n48:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n49:          name: controller0\n50:          state: SignalPending\n51:        workers:\n52:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n53:          name: worker0\n54:          state: SignalPending\n55:      state: SchedulableWait\n56:    state: SchedulableWait\n
  • Lines 7-33 are the two Commands that make up this plan -- an airgapupdate and k0supdate.
  • Lines 38-55 are the associated status entries for the two Commands.

The state of this Plan exerpt is that autopilot has successfully processed the Plan, and has begun processing the airgapupdate Command. Its status indicates SignalSent which means that the Signal Node has been sent signaling information to perform an airgap update.

"},{"location":"autopilot-multicommand/#processing-states","title":"Processing States","text":"

The following are the various states that both Plans and Commands adhere to.

stateDiagram-v2\n    [*]-->NewPlan\n    NewPlan-->SchedulableWait\n    NewPlan-->Errors***\n\n    SchedulableWait-->Schedulable\n    SchedulableWait-->Completed\n    Schedulable-->SchedulableWait\n\n    Errors***-->[*]\n    Completed-->[*]

Note that the Errors state is elaborated in detail below in Error States*.

"},{"location":"autopilot-multicommand/#newplan","title":"NewPlan","text":"

When a Plan is created with the name autopilot, the NewPlan state processing takes effect.

It is the responsibility of NewPlan to ensure that the status of all the Commands are represented in the Plan status. This Plan status is needed at later points in Plan processing to determine if the entire Plan is completed.

The main difference between NewPlan and all the other states is that NewPlan will iterate over all commands; the other states deal with the active command.

"},{"location":"autopilot-multicommand/#schedulablewait","title":"SchedulableWait","text":"

Used to evaluate a Command to determine if it can be scheduled for processing. If the Command is determined that it can be processed, the state is set to Schedulable.

"},{"location":"autopilot-multicommand/#schedulable","title":"Schedulable","text":"

The Schedulable state is set by SchedulableWait to indicate that this command should execute. The execution of a Command in this state will be whichever logic is defined by the Command.

The ending of this state should either transition to SchedulableWait for further processing + completion detection, or transition to an error.

"},{"location":"autopilot-multicommand/#completed","title":"Completed","text":"

The Completed state indicates that the command has finished processing. Once a plan/command are in the Completed state, no further processing will occur on this plan/command.

"},{"location":"autopilot-multicommand/#error-states","title":"Error States","text":"

When a plan or command processing goes into one of the designated error states, this is considered fatal and the plan/command processing will terminate.

Error states are generally defined by the Command implementation. The core autopilot functionality is only interested when in the 4 core states (NewPlan, SchedulableWait, Schedulable, Completed), and treats all other states as an error.

flowchart TD\n    Errors --> InconsistentTargets\n    Errors --> IncompleteTargets\n    Errors --> Restricted\n    Errors --> MissingPlatform\n    Errors --> MissingSignalNode
Error State Command States Description InconsistentTargets k0supdate Schedulable Indicates that a Signal Node probe has failed for any node that was previously discovered during NewPlan. IncompleteTargets airgapupdate, k0supdate NewPlan, Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no ControlNode or Node object) Restricted airgapupdate, k0supdate NewPlan Indicates that a Plan has requested an update of a Signal Node type that contradicts the startup exclusions (the --exclude-from-plans argument) MissingSignalNode airgapupdate, k0supdate Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no matching ControlNode or Node object)"},{"location":"autopilot-multicommand/#sequence-example","title":"Sequence: Example","text":"

Using the example above as a reference, this outlines the basic sequence of events of state transitions to the operations performed on each object.

sequenceDiagram\n  PlanStateHandler->>+AirgapUpdateCommand: State: NewPlan\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.NewPlan() -- >SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: NewPlan\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.NewPlan() --> SchedulableWait\n  Note over PlanStateHandler,SignalNode(worker0): NewPlan Finished / All Commands\n\n  PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+AirgapUpdateCommand: State: Schedulable\n  AirgapUpdateCommand->>-SignalNode(worker0): signal_v2(airgap-data) --> SchedulableWait\n  PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(worker0): AirgapUpdate Finished / worker0\n\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n  K0sUpdateCommand->>-SignalNode(controller0): signal_v2(k0s-data) --> SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(controller0): K0sUpdate Finished / controller0\n\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n  K0sUpdateCommand->>-SignalNode(worker0): signal_v2(k0s-data) --> SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(worker0): K0sUpdate Finished / worker0\n\n  PlanStateHandler->>PlanStateHandler: Completed
"},{"location":"autopilot/","title":"Autopilot","text":"

A tool for updating your k0s controller and worker nodes using specialized plans. There is a public update-server hosted on the same domain as the documentation site. See the example below on how to use it. There is only a single channel edge_release available. The channel exposes the latest released version.

"},{"location":"autopilot/#how-it-works","title":"How it works","text":"
  • You create a Plan YAML
    • Defining the update payload (new version of k0s, URLs for platforms, etc)
    • Add definitions for all the nodes that should receive the update.
      • Either statically, or dynamically using label/field selectors
  • Apply the Plan
    • Applying a Plan is a simple kubectl apply operation.
  • Monitor the progress
    • The applied Plan provides a status that details the progress.
"},{"location":"autopilot/#automatic-updates","title":"Automatic updates","text":"

To enable automatic updates, create an UpdateConfig object:

apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdateConfig\nmetadata:\nname: example\nnamespace: default\nspec:\nchannel: edge_release\nupdateServer: https://docs.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n
"},{"location":"autopilot/#safeguards","title":"Safeguards","text":"

There are a number of safeguards in place to avoid breaking a cluster.

"},{"location":"autopilot/#stateless-component","title":"Stateless Component","text":"
  • The autopilot component were designed to not require any heavy state, or massive synchronization. Controllers can disappear, and backup controllers can resume the autopilot operations.
"},{"location":"autopilot/#workers-update-only-after-controllers","title":"Workers Update Only After Controllers","text":"
  • The versioning that Kubelet and the Kubernetes API server adhere to requires that Kubelets should not be of a newer version than the API server.
  • How autopilot handles this is that when a Plan is applied that has both controller and worker nodes, all of the controller nodes will be updated first. It is only when all controllers have updated successfully that worker nodes will receive their update instructions.
"},{"location":"autopilot/#plans-are-immutable","title":"Plans are Immutable","text":"
  • When you apply a Plan, autopilot evaluates all of the controllers and workers that should be included into the Plan, and tracks them in the status. After this point, no additional changes to the plan (other than status) will be recognized.
    • This helps in largely dynamic worker node environments where nodes that may have been matched by the selector discovery method no longer exist by the time the update is ready to be scheduled.
"},{"location":"autopilot/#controller-quorum-safety","title":"Controller Quorum Safety","text":"
  • Prior to scheduling a controller update, autopilot queries the API server of all controllers to ensure that they report a successful /ready
  • Only once all controllers are /ready will the current controller get sent update signaling.
  • In the event that any controller reports a non-ready, the Plan transitions into an InconsistentTargets state, and the Plan execution ends.
"},{"location":"autopilot/#controllers-update-sequentially","title":"Controllers Update Sequentially","text":"
  • Despite having the configuration options for controllers to set concurrency, only one controller will be updated at a time.
"},{"location":"autopilot/#update-payload-verification","title":"Update Payload Verification","text":"
  • Each update object payload can provide an optional sha256 hash of the update content (specified in url), which is compared against the update content after it downloads.
"},{"location":"autopilot/#configuration","title":"Configuration","text":"

Autopilot relies on a Plan object on its instructions on what to update.

Here is an arbitrary Autopilot plan:

apiVersion: autopilot.k0sproject.io/v1beta2\nkind: Plan\nmetadata:\nname: autopilot\n\nspec:\nid: id1234\ntimestamp: now\n\ncommands:\n- k0supdate:\nversion: v1.27.5+k0s.0\nplatforms:\nlinux-amd64:\nurl: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-amd64\nsha256: '0000000000000000000000000000000000000000000000000000000000000000'\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#core-fields","title":"Core Fields","text":""},{"location":"autopilot/#apiversion-string-required","title":"apiVersion <string> (required)","text":"
  • The current version of the Autopilot API is v1beta2, with a full group-version of autopilot.k0sproject.io/v1beta2
"},{"location":"autopilot/#metadataname-string-required","title":"metadata.name <string> (required)","text":"
  • The name of the plan should always be autopilot
    • Note: Plans will not execute if they don't follow this convention.
"},{"location":"autopilot/#spec-fields","title":"Spec Fields","text":""},{"location":"autopilot/#specid-string-optional","title":"spec.id <string> (optional)","text":"
  • An identifier that can be provided by the creator for informational and tracking purposes.
"},{"location":"autopilot/#spectimestamp-string-optional","title":"spec.timestamp <string> (optional)","text":"
  • A timestamp value that can be provided by the creator for informational purposes. Autopilot does nothing with this information.
"},{"location":"autopilot/#speccommands-required","title":"spec.commands[] (required)","text":"
  • The commands contains all of the commands that should be performed as a part of the plan.
"},{"location":"autopilot/#k0supdate-command","title":"k0supdate Command","text":""},{"location":"autopilot/#speccommandsk0supdateversion-string-required","title":"spec.commands[].k0supdate.version <string> (required)","text":"
  • The version of the binary being updated. This version is used to compare against the installed version before and after update to ensure success.
"},{"location":"autopilot/#speccommandsk0supdateplatformsurl-string-required","title":"spec.commands[].k0supdate.platforms.*.url <string> (required)","text":"
  • An URL providing where the updated binary should be downloaded from, for this specific platform.
    • The naming of platforms is a combination of $GOOS and $GOARCH, separated by a hyphen (-)
      • eg: linux-amd64, linux-arm64, linux-arm
    • Note: The main supported platform is linux. Autopilot may work on other platforms, however this has not been tested.
"},{"location":"autopilot/#speccommandsk0supdateplatformssha256-string-optional","title":"spec.commands[].k0supdate.platforms.*.sha256 <string> (optional)","text":"
  • If a SHA256 hash is provided for the binary, the completed downloaded will be verified against it.
"},{"location":"autopilot/#speccommandsk0supdatetargetscontrollers-object-optional","title":"spec.commands[].k0supdate.targets.controllers <object> (optional)","text":"
  • This object provides the details of how controllers should be updated.
"},{"location":"autopilot/#speccommandsk0supdatetargetscontrollerslimitsconcurrent-int-fixed-as-1","title":"spec.commands[].k0supdate.targets.controllers.limits.concurrent <int> (fixed as 1)","text":"
  • The configuration allows for specifying the number of concurrent controller updates through the plan spec, however for controller targets this is fixed always to 1.
  • By ensuring that only one controller updates at a time, we aim to avoid scenarios where quorom may be disrupted.
"},{"location":"autopilot/#speccommandsk0supdatetargetsworkers-object-optional","title":"spec.commands[].k0supdate.targets.workers <object> (optional)","text":"
  • This object provides the details of how workers should be updated.
"},{"location":"autopilot/#speccommandsk0supdatetargetsworkerslimitsconcurrent-int-optional-default-1","title":"spec.commands[].k0supdate.targets.workers.limits.concurrent <int> (optional, default = 1)","text":"
  • Specifying a concurrent value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1 is assumed.
"},{"location":"autopilot/#airgapupdate-command","title":"airgapupdate Command","text":""},{"location":"autopilot/#speccommandsairgapupdateversion-string-required","title":"spec.commands[].airgapupdate.version <string> (required)","text":"
  • The version of the airgap bundle being updated.
"},{"location":"autopilot/#speccommandsairgapupdateplatformsurl-string-required","title":"spec.commands[].airgapupdate.platforms.*.url <string> (required)","text":"
  • An URL providing where the updated binary should be downloaded from, for this specific platform.
    • The naming of platforms is a combination of $GOOS and $GOARCH, separated by a hyphen (-)
      • eg: linux-amd64, linux-arm64, linux-arm
    • Note: The main supported platform is linux. Autopilot may work on other platforms, however this has not been tested.
"},{"location":"autopilot/#speccommandsairgapupdateplatformssha256-string-optional","title":"spec.commands[].airgapupdate.platforms.*.sha256 <string> (optional)","text":"
  • If a SHA256 hash is provided for the binary, the completed downloaded will be verified against it.
"},{"location":"autopilot/#speccommandsairgapupdatetargetsworkers-object-optional","title":"spec.commands[].airgapupdate.targets.workers <object> (optional)","text":"
  • This object provides the details of how workers should be updated.
"},{"location":"autopilot/#speccommandsairgapupdatetargetsworkerslimitsconcurrent-int-optional-default-1","title":"spec.commands[].airgapupdate.targets.workers.limits.concurrent <int> (optional, default = 1)","text":"
  • Specifying a concurrent value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1 is assumed.
"},{"location":"autopilot/#static-discovery","title":"Static Discovery","text":"

This defines the static discovery method used for this set of targets (controllers, workers). The static discovery method relies on a fixed set of hostnames defined in .nodes.

It is expected that a Node (workers) or ControlNode (controllers) object exists with the same name.

  static:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoverystaticnodes-string-required-for-static","title":"spec.commands[].k0supdate.targets.*.discovery.static.nodes[] <string> (required for static)","text":"
  • A list of hostnames that should be included in target set (controllers, workers).
"},{"location":"autopilot/#selector-target-discovery","title":"Selector Target Discovery","text":"

The selector target discovery method relies on a dynamic query to the Kubernetes API using labels and fields to produce a set of hosts that should be updated.

Providing both labels and fields in the selector definition will result in a logical AND of both operands.

  selector:\nlabels: environment=staging\nfields: metadata.name=worker2\n

Specifying an empty selector will result in all nodes being selected for this target set.

  selector: {}\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorlabels-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.labels <string> (optional)","text":"
  • A collection of name/value labels that should be used for finding the appropriate nodes for the update of this target set.
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorfields-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.fields <string> (optional)","text":"
  • A collection of name/value fields that should be used for finding the appropriate nodes for the update of this target set.
    • Note: Currently only the field metadata.name is available as a query field.
"},{"location":"autopilot/#status-reporting","title":"Status Reporting","text":"

After a Plan has been applied, its progress can be viewed in the .status of the autopilot Plan.

    kubectl get plan autopilot -oyaml\n

An example of a Plan status:

  status:\nstate: SchedulableWait\ncommands:\n- state: SchedulableWait\nk0supdate:\ncontrollers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:44Z\"\nname: controller0\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller1\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller2\nstate: SignalPending\nworkers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker0\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker1\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker2\nstate: SignalPending\n

To read this status, this indicates that:

  • The overall status of the update is SchedulableWait, meaning that autopilot is waiting for the next opportunity to process a command.
  • There are three controller nodes
    • Two controllers have SignalCompleted successfully
    • One is waiting to be signalled (SignalPending)
  • There are also three worker nodes
    • All are awaiting signaling updates (SignalPending)
"},{"location":"autopilot/#plan-status","title":"Plan Status","text":"

The Plan status at .status.status represents the overall status of the autopilot update operation. There are a number of statuses available:

Status Description Ends Plan? IncompleteTargets There are nodes in the resolved Plan that do not have associated Node (worker) or ControlNode (controller) objects. Yes InconsistentTargets A controller has reported itself as not-ready during the selection of the next controller to update. Yes Schedulable Indicates that the Plan can be re-evaluated to determine which next node to update. No SchedulableWait Scheduling operations are in progress, and no further update scheduling should occur. No Completed The Plan has run successfully to completion. Yes Restricted The Plan included node types (controller or worker) that violates the --exclude-from-plans restrictions. Yes"},{"location":"autopilot/#node-status","title":"Node Status","text":"

Similar to the Plan Status, the individual nodes can have their own statuses:

Status Description SignalPending The node is available and awaiting an update signal SignalSent Update signaling has been successfully applied to this node. MissingPlatform This node is a platform that an update has not been provided for. MissingSignalNode This node does have an associated Node (worker) or ControlNode (controller) object."},{"location":"autopilot/#updateconfig","title":"UpdateConfig","text":""},{"location":"autopilot/#updateconfig-core-fields","title":"UpdateConfig Core Fields","text":""},{"location":"autopilot/#apiversion-string-required-field","title":"apiVersion <string> (required field)","text":"
  • API version. The current version of the Autopilot API is v1beta2, with a full group-version of autopilot.k0sproject.io/v1beta2
"},{"location":"autopilot/#metadataname-string-required-field","title":"metadata.name <string> (required field)","text":"
  • Name of the config.
"},{"location":"autopilot/#spec","title":"Spec","text":""},{"location":"autopilot/#specchannel-string-optional","title":"spec.channel <string> (optional)","text":"
  • Update channel to use. Supported values: stable(default), unstable.
"},{"location":"autopilot/#specupdateserver-string-optional","title":"spec.updateServer <string> (optional)","text":"
  • Update server url.
"},{"location":"autopilot/#specupgradestrategycron-string-optional","title":"spec.upgradeStrategy.cron <string> (optional)","text":"
  • Schedule to check for updates in crontab format.
"},{"location":"autopilot/#specplanspec-string-optional","title":"spec.planSpec <string> (optional)","text":"
  • Describes the behavior of the autopilot generated Plan
"},{"location":"autopilot/#example","title":"Example","text":"
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdaterConfig\nmetadata:\nname: example\nspec:\nchannel: stable\nupdateServer: https://updates.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n# Optional. Specifies a created Plan object\nplanSpec:\ncommands:\n- k0supdate: # optional\nforceupdate: true # optional\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\nairgapupdate: # optional\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#faq","title":"FAQ","text":""},{"location":"autopilot/#q-how-do-i-apply-the-plan-and-controlnode-crds","title":"Q: How do I apply the Plan and ControlNode CRDs?","text":"

A: These CRD definitions are embedded in the autopilot binary and applied on startup. No additional action is needed.

"},{"location":"autopilot/#q-how-will-controlnode-instances-get-removed","title":"Q: How will ControlNode instances get removed?","text":"

A: ControlNode instances are created by autopilot controllers as they startup. When controllers disappear, they will not remove their associated ControlNode instance. It is the responsibility of the operator/administrator to ensure their maintenance.

"},{"location":"autopilot/#q-i-upgraded-my-workers-and-now-kubelets-are-no-longer-reporting","title":"Q: I upgraded my workers, and now Kubelets are no longer reporting","text":"

You probably upgraded your workers to an API version greater than what is available on the API server.

https://kubernetes.io/releases/version-skew-policy/

Make sure that your controllers are at the desired version first before upgrading workers.

"},{"location":"backup/","title":"Backup/Restore overview","text":"

k0s has integrated support for backing up cluster state and configuration. The k0s backup utility is aiming to back up and restore k0s managed parts of the cluster.

The backups created by k0s backup command have following pieces of your cluster:

  • certificates (the content of the <data-dir>/pki directory)
  • etcd snapshot, if the etcd datastore is used
  • Kine/SQLite snapshot, if the Kine/SQLite datastore is used
  • k0s.yaml
  • any custom defined manifests under the <data-dir>/manifests
  • any image bundles located under the <data-dir>/images
  • any helm configuration

Parts NOT covered by the backup utility:

  • PersistentVolumes of any running application
  • datastore, in case something else than etcd or Kine/SQLite is used
  • any configuration to the cluster introduced by manual changes (e.g. changes that weren't saved under the <data-dir>/manifests)

Any of the backup/restore related operations MUST be performed on the controller node.

"},{"location":"backup/#backuprestore-a-k0s-node-locally","title":"Backup/restore a k0s node locally","text":""},{"location":"backup/#backup-local","title":"Backup (local)","text":"

To create backup run the following command on the controller node:

k0s backup --save-path=<directory>\n

The directory used for the save-path value must exist and be writable. The default value is the current working directory. The command provides backup archive using following naming convention: k0s_backup_<ISODatetimeString>.tar.gz

Because of the DateTime usage, it is guaranteed that none of the previously created archives would be overwritten.

To output the backup archive to stdout, use - as the save path.

"},{"location":"backup/#restore-local","title":"Restore (local)","text":"

To restore cluster state from the archive use the following command on the controller node:

k0s restore /tmp/k0s_backup_2021-04-26T19_51_57_000Z.tar.gz\n

The command would fail if the data directory for the current controller has overlapping data with the backup archive content.

The command would use the archived k0s.yaml as the cluster configuration description.

In case if your cluster is HA, after restoring single controller node, join the rest of the controller nodes to the cluster. E.g. steps for N nodes cluster would be:

  • Restore backup on fresh machine
  • Run controller there
  • Join N-1 new machines to the cluster the same way as for the first setup.

To read the backup archive from stdin, use - as the file path.

"},{"location":"backup/#encrypting-backups-local","title":"Encrypting backups (local)","text":"

By using - as the save or restore path, it is possible to pipe the backup archive through an encryption utility such as GnuPG or OpenSSL.

Note that unencrypted data will still briefly exist as temporary files on the local file system during the backup archvive generation.

"},{"location":"backup/#encrypting-backups-using-gnupg","title":"Encrypting backups using GnuPG","text":"

Follow the instructions for your operating system to install the gpg command if it is not already installed.

This tutorial only covers the bare minimum for example purposes. For secure key management practices and advanced usage refer to the GnuPG user manual.

To generate a new key-pair, use:

gpg --gen-key\n

The key will be stored in your key ring.

gpg --list-keys\n

This will output a list of keys:

/home/user/.gnupg/pubring.gpg\n------------------------------\npub   4096R/BD33228F 2022-01-13\nuid                  Example User <user@example.com>\nsub   4096R/2F78C251 2022-01-13\n

To export the private key for decrypting the backup on another host, note the key ID (\"BD33228F\" in this example) in the list and use:

gpg --export-secret-keys --armor BD33228F > k0s.key\n

To create an encrypted k0s backup:

k0s backup --save-path - | gpg --encrypt --recipient user@example.com > backup.tar.gz.gpg\n
"},{"location":"backup/#restoring-encrypted-backups-using-gnupg","title":"Restoring encrypted backups using GnuPG","text":"

You must have the private key in your gpg keychain. To import the key that was exported in the previous example, use:

gpg --import k0s.key\n

To restore the encrypted backup, use:

gpg --decrypt backup.tar.gz.gpg | k0s restore -\n
"},{"location":"backup/#backuprestore-a-k0s-cluster-using-k0sctl","title":"Backup/restore a k0s cluster using k0sctl","text":"

With k0sctl you can perform cluster level backup and restore remotely with one command.

"},{"location":"backup/#backup-remote","title":"Backup (remote)","text":"

To create backup run the following command:

k0sctl backup\n

k0sctl connects to the cluster nodes to create a backup. The backup file is stored in the current working directory.

"},{"location":"backup/#restore-remote","title":"Restore (remote)","text":"

To restore cluster state from the archive use the following command:

k0sctl apply --restore-from /path/to/backup_file.tar.gz\n

The control plane load balancer address (externalAddress) needs to remain the same between backup and restore. This is caused by the fact that all worker node components connect to this address and cannot currently be re-configured.

"},{"location":"cis_benchmark/","title":"Kube-bench Security Benchmark","text":"

Kube-bench is an open source tool which can be used to verify security best practices as defined in CIS Kubernetes Benchmark. It provides a number of tests to help harden your k0s clusters. By default, k0s will pass Kube-bench benchmarks with some exceptions, which are shown below.

"},{"location":"cis_benchmark/#run","title":"Run","text":"

Follow the Kube-bench quick start instructions.

After installing the Kube-bench on the host that is running k0s cluster run the following command:

kube-bench run --config-dir docs/kube-bench/cfg/ --benchmark k0s-1.0\n
"},{"location":"cis_benchmark/#summary-of-disabled-checks","title":"Summary of disabled checks","text":""},{"location":"cis_benchmark/#master-node-security-configuration","title":"Master Node Security Configuration","text":"

The current configuration has in total 8 master checks disabled:

  1. id: 1.2.10 - EventRateLimit requires external yaml config. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the admission control plugin EventRateLimit is set (Manual)\"\n
  2. id: 1.2.12 - By default this isn't passed to the apiserver for air-gap functionality

    type: skip\ntext: \"Ensure that the admission control plugin AlwaysPullImages is set (Manual)\"\n
  3. id: 1.2.22 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-path argument is set (Automated)\"\n
  4. id: 1.2.23 - For sake of simplicity of k0s all audit configuration are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)\"\n
  5. id: 1.2.24 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)\"\n
  6. id: 1.2.25 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)\"\n
  7. id: 1.2.33 - By default it is not enabled. Left for the users to decide

    type: skip\ntext: \"Ensure that the --encryption-provider-config argument is set as appropriate (Manual)\"\n
  8. id: 1.2.34 - By default it is not enabled. Left for the users to decide

    type: skip\ntext: \"Ensure that encryption providers are appropriately configured (Manual)\"\n
"},{"location":"cis_benchmark/#worker-node-security-configuration","title":"Worker Node Security Configuration","text":"

and 4 node checks disabled:

  1. id: 4.1.1 - not applicable since k0s does not use kubelet service file

    type: skip\ntext: \"Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)\"\n
  2. id: 4.1.2 - not applicable since k0s does not use kubelet service file

    type: skip\ntext: \"Ensure that the kubelet service file ownership is set to root:root (Automated)\"\n
  3. id: 4.2.6 - k0s does not set this. See https://github.com/kubernetes/kubernetes/issues/66693

    type: skip\ntext: \"Ensure that the --protect-kernel-defaults argument is set to true (Automated)\"\n
  4. id: 4.2.10 - k0s doesn't set this up because certs get auto rotated

    type: skip\ntext: \"Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)\"\n
"},{"location":"cis_benchmark/#control-plane-configuration","title":"Control Plane Configuration","text":"

3 checks for the control plane:

  1. id: 3.1.1 - For purpose of being fully automated k0s is skipping this check

    type: skip\ntext: \"Client certificate authentication should not be used for users (Manual)\"\n
  2. id: 3.2.1 - out-of-the box configuration does not have any audit policy configuration but users can customize it in spec.api.extraArgs section of the config

    type: skip\ntext: \"Ensure that a minimal audit policy is created (Manual)\"\n
  3. id: 3.2.2 - Same as previous

    type: skip\ntext: \"Ensure that the audit policy covers key security concerns (Manual)\"\n
"},{"location":"cis_benchmark/#kubernetes-policies","title":"Kubernetes Policies","text":"

Policy checks are also disabled. The checks are manual and are up to the end user to decide on them.

"},{"location":"cloud-providers/","title":"Cloud providers","text":"

k0s builds Kubernetes components in providerless mode, meaning that cloud providers are not built into k0s-managed Kubernetes components. As such, you must externally configure the cloud providers to enable their support in your k0s cluster (for more information on running Kubernetes with cloud providers, refer to the Kubernetes documentation.

"},{"location":"cloud-providers/#external-cloud-providers","title":"External Cloud Providers","text":""},{"location":"cloud-providers/#enable-cloud-provider-support-in-kubelet","title":"Enable cloud provider support in kubelet","text":"

Even when all components are built with providerless mode, you must be able to enable cloud provider mode for kubelet. To do this, run the workers with --enable-cloud-provider=true.

When deploying with k0sctl, you can add this into the installFlags of worker hosts.

spec:\nhosts:\n- ssh:\naddress: 10.0.0.1\nuser: root\nkeyPath: ~/.ssh/id_rsa\ninstallFlags:\n- --enable-cloud-provider\n- --kubelet-extra-args=\"--cloud-provider=external\"\nrole: worker\n
"},{"location":"cloud-providers/#deploy-the-cloud-provider","title":"Deploy the cloud provider","text":"

The easiest way to deploy cloud provider controllers is on the k0s cluster.

Use the built-in manifest deployer built into k0s to deploy your cloud provider as a k0s-managed stack. Next, just drop all required manifests into the /var/lib/k0s/manifests/aws/ directory, and k0s will handle the deployment.

Note: The prerequisites for the various cloud providers can vary (for example, several require that configuration files be present on all of the nodes). Refer to your chosen cloud provider's documentation as necessary.

"},{"location":"cloud-providers/#k0s-cloud-provider","title":"k0s Cloud Provider","text":"

Alternatively, k0s provides its own lightweight cloud provider that can be used to statically assign ExternalIP values to worker nodes via Kubernetes annotations. This is beneficial for those who need to expose worker nodes externally via static IP assignments.

To enable this functionality, add the parameter --enable-k0s-cloud-provider=true to all controllers, and --enable-cloud-provider=true to all workers.

Adding a static IP address to a node using kubectl:

kubectl annotate \\\nnode <node> \\\nk0sproject.io/node-ip-external=<external IP>\n

Both IPv4 and IPv6 addresses are supported.

"},{"location":"cloud-providers/#defaults","title":"Defaults","text":"

The default node refresh interval is 2m, which can be overridden using the --k0s-cloud-provider-update-frequency=<duration> parameter when launching the controller(s).

The default port that the cloud provider binds to can be overridden using the --k0s-cloud-provider-port=<int> parameter when launching the controller(s).

"},{"location":"commercial-support/","title":"Commercial support","text":"

Commercial support for k0s if offered by Mirantis Inc..

Mirantis can provide various different levels of support starting from DevCare (9-to-5) all the way to OpsCare+ with fully managed service.

On top of our normal release and support model our commercial customers have access to critical security patches even for released versions that fall outside of the Open Source maintained releases.1 Commercial support also includes support for k0s related tooling such as k0sctl.

If you are interested in commercial support for k0s check out our support description and please contact us for further details.

  1. This is assuming there is a compatible release of upstream project with the fix\u00a0\u21a9

"},{"location":"configuration-validation/","title":"Configuration validation","text":"

k0s command-line interface has the ability to validate config syntax:

k0s validate config --config path/to/config/file\n

validate config sub-command can validate the following:

  1. YAML formatting
  2. SAN addresses
  3. Network providers
  4. Worker profiles
"},{"location":"configuration/","title":"Configuration options","text":""},{"location":"configuration/#using-a-configuration-file","title":"Using a configuration file","text":"

k0s can be installed without a config file. In that case the default configuration will be used. You can, though, create and run your own non-default configuration (used by the k0s controller nodes).

k0s supports providing only partial configurations. In case of partial configuration is provided, k0s will use the defaults for any missing values.

  1. Generate a yaml config file that uses the default settings.

    mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
  2. Modify the new yaml config file according to your needs, refer to Configuration file reference below. You can remove the default values if wanted as k0s supports partial configs too.

  3. Install k0s with your new config file.

    sudo k0s install controller -c /etc/k0s/k0s.yaml\n
  4. If you need to modify your existing configuration later on, you can change your config file also when k0s is running, but remember to restart k0s to apply your configuration changes.

    sudo k0s stop\nsudo k0s start\n
"},{"location":"configuration/#configuring-k0s-via-k0sctl","title":"Configuring k0s via k0sctl","text":"

k0sctl can deploy your configuration options at cluster creation time. Your options should be placed in the spec.k0s.config section of the k0sctl's configuration file. See the section on how to install k0s via k0sctl and the k0sctl README for more information.

"},{"location":"configuration/#configuration-file-reference","title":"Configuration file reference","text":"

CAUTION: As many of the available options affect items deep in the stack, you should fully understand the correlation between the configuration file components and your specific environment before making any changes.

A YAML config file follows, with defaults as generated by the k0s config create command:

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\napi:\naddress: 192.168.68.104\nexternalAddress: my-lb-address.example.com\nk0sApiPort: 9443\nport: 6443\nsans:\n- 192.168.68.104\ntunneledNetworkingMode: false\ncontrollerManager: {}\nextensions:\nhelm:\nconcurrencyLevel: 5\ncharts: null\nrepositories: null\nstorage:\ncreate_default_storage_class: false\ntype: external_storage\ninstallConfig:\nusers:\netcdUser: etcd\nkineUser: kube-apiserver\nkonnectivityUser: konnectivity-server\nkubeAPIserverUser: kube-apiserver\nkubeSchedulerUser: kube-scheduler\nkonnectivity:\nadminPort: 8133\nagentPort: 8132\nnetwork:\ncalico: null\nclusterDomain: cluster.local\ndualStack: {}\nkubeProxy:\nmetricsBindAddress: 0.0.0.0:10249\nmode: iptables\nkuberouter:\nautoMTU: true\nhairpin: Enabled\nipMasq: false\nmetricsPort: 8080\nmtu: 0\npeerRouterASNs: \"\"\npeerRouterIPs: \"\"\nnodeLocalLoadBalancing:\nenabled: false\nenvoyProxy:\napiServerBindPort: 7443\nimage:\nimage: docker.io/envoyproxy/envoy-distroless\nversion: v1.24.1\nkonnectivityServerBindPort: 7132\ntype: EnvoyProxy\npodCIDR: 10.244.0.0/16\nprovider: kuberouter\nserviceCIDR: 10.96.0.0/12\nscheduler: {}\nstorage:\netcd:\nexternalCluster: null\npeerAddress: 192.168.68.104\ntype: etcd\ntelemetry:\nenabled: true\nfeatureGates:\n- name: feature_XXX\nenabled: true\ncomponents: [\"kubelet\", \"kube-api\", \"kube-scheduler\"]\n- name: feature_YYY\nenabled: true\n-\nname: feature_ZZZ\nenabled: false\n
"},{"location":"configuration/#spec-key-detail","title":"spec Key Detail","text":""},{"location":"configuration/#specapi","title":"spec.api","text":"Element Description externalAddress The loadbalancer address (for k0s controllers running behind a loadbalancer). Configures all cluster components to connect to this address and also configures this address for use when joining new nodes to the cluster. address Local address on which to bind an API. Also serves as one of the addresses pushed on the k0s create service certificate on the API. Defaults to first non-local address found on the node. sans List of additional addresses to push to API servers serving the certificate. extraArgs Map of key-values (strings) for any extra arguments to pass down to Kubernetes api-server process. port\u00b9 Custom port for kube-api server to listen on (default: 6443) k0sApiPort\u00b9 Custom port for k0s-api server to listen on (default: 9443) tunneledNetworkingMode Whether to tunnel Kubernetes access from worker nodes via local port forwarding. (default: false)

\u00b9 If port and k0sApiPort are used with the externalAddress element, the loadbalancer serving at externalAddress must listen on the same ports.

"},{"location":"configuration/#specstorage","title":"spec.storage","text":"Element Description type Type of the data store (valid values:etcd or kine). Note: Type etcd will cause k0s to create and manage an elastic etcd cluster within the controller nodes. etcd.peerAddress Node address used for etcd cluster peering. etcd.extraArgs Map of key-values (strings) for any extra arguments to pass down to etcd process. kine.dataSource kine datasource URL."},{"location":"configuration/#specnetwork","title":"spec.network","text":"Element Description provider Network provider (valid values: calico, kuberouter, or custom). For custom, you can push any network provider (default: kuberouter). Be aware that it is your responsibility to configure all of the CNI-related setups, including the CNI provider itself and all necessary host levels setups (for example, CNI binaries). Note: Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment. podCIDR Pod network CIDR to use in the cluster. serviceCIDR Network CIDR to use for cluster VIP services. clusterDomain Cluster Domain to be passed to the kubelet and the coredns configuration."},{"location":"configuration/#specnetworkcalico","title":"spec.network.calico","text":"Element Description mode vxlan (default), ipip or bird overlay Overlay mode: Always (default), CrossSubnet or Never (requires mode=vxlan to disable calico overlay-network). vxlanPort The UDP port for VXLAN (default: 4789). vxlanVNI The virtual network ID for VXLAN (default: 4096). mtu MTU for overlay network (default: 0, which causes Calico to detect optimal MTU during bootstrap). wireguard Enable wireguard-based encryption (default: false). Your host system must be wireguard ready (refer to the Calico documentation for details). flexVolumeDriverPath The host path for Calicos flex-volume-driver(default: /usr/libexec/k0s/kubelet-plugins/volume/exec/nodeagent~uds). Change this path only if the default path is unwriteable (refer to Project Calico Issue #2712 for details). Ideally, you will pair this option with a custom volumePluginDir in the profile you use for your worker nodes. ipAutodetectionMethod Use to force Calico to pick up the interface for pod network inter-node routing (default: \"\", meaning not set, so that Calico will instead use its defaults). For more information, refer to the Calico documentation. envVars Map of key-values (strings) for any calico-node environment variable."},{"location":"configuration/#specnetworkcalicoenvvars","title":"spec.network.calico.envVars","text":"

Environment variable's value must be string, e.g.:

spec:\nnetwork:\nprovider: calico\ncalico:\nenvVars:\nTEST_BOOL_VAR: \"true\"\nTEST_INT_VAR: \"42\"\nTEST_STRING_VAR: test\n

K0s runs Calico with some predefined vars, which can be overwritten by setting new value in spec.network.calico.envVars:

CALICO_IPV4POOL_CIDR: \"{{ spec.network.podCIDR }}\"\nCALICO_DISABLE_FILE_LOGGING: \"true\"\nFELIX_DEFAULTENDPOINTTOHOSTACTION: \"ACCEPT\"\nFELIX_LOGSEVERITYSCREEN: \"info\"\nFELIX_HEALTHENABLED: \"true\"\nFELIX_PROMETHEUSMETRICSENABLED: \"true\"\nFELIX_FEATUREDETECTOVERRIDE: \"ChecksumOffloadBroken=true\"\n

FELIX_FEATUREDETECTOVERRIDE: ChecksumOffloadBroken=true disables VXLAN offloading because of projectcalico/calico#4727.

In SingleStack mode there are additional vars:

FELIX_IPV6SUPPORT: \"false\"\n

In DualStack mode there are additional vars:

CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\nFELIX_IPV6SUPPORT: \"true\"\nIP6: \"autodetect\"\nCALICO_IPV6POOL_CIDR: \"{{ spec.network.dualStack.IPv6podCIDR }}\"\n
"},{"location":"configuration/#specnetworkkuberouter","title":"spec.network.kuberouter","text":"Element Description autoMTU Autodetection of used MTU (default: true). mtu Override MTU setting, if autoMTU must be set to false). metricsPort Kube-router metrics server port. Set to 0 to disable metrics (default: 8080). peerRouterIPs Comma-separated list of global peer addresses. peerRouterASNs Comma-separated list of global peer ASNs. hairpin Hairpin mode, supported modes Enabled: enabled cluster wide, Allowed: must be allowed per service using annotations, Disabled: doesn't work at all (default: Enabled) hairpinMode Deprecated Use hairpin instead. If both hairpin and hairpinMode are defined, this is ignored. If only hairpinMode is configured explicitly activates hairpinMode (https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode). ipMasq IP masquerade for traffic originating from the pod network, and destined outside of it (default: false)

Note: Kube-router allows many networking aspects to be configured per node, service, and pod (for more information, refer to the Kube-router user guide).

"},{"location":"configuration/#specnetworkkubeproxy","title":"spec.network.kubeProxy","text":"Element Description disabled Disable kube-proxy altogether (default: false). mode Kube proxy operating mode, supported modes iptables, ipvs, userspace (default: iptables) iptables Kube proxy iptables settings ipvs Kube proxy ipvs settings

Default kube-proxy iptables settings:

iptables:\nmasqueradeAll: false\nmasqueradeBit: null\nminSyncPeriod: 0s\nsyncPeriod: 0s\n

Default kube-proxy ipvs settings:

ipvs:\nexcludeCIDRs: null\nminSyncPeriod: 0s\nscheduler: \"\"\nstrictARP: false\nsyncPeriod: 0s\ntcpFinTimeout: 0s\ntcpTimeout: 0s\nudpTimeout: 0s\n
"},{"location":"configuration/#specnetworknodelocalloadbalancing","title":"spec.network.nodeLocalLoadBalancing","text":"

Configuration options related to k0s's node-local load balancing feature.

Note: This feature is experimental! Expect instabilities and/or breaking changes.

Element Description enabled Indicates if node-local load balancing should be used to access Kubernetes API servers from worker nodes. Default: false. type The type of the node-local load balancer to deploy on worker nodes. Default: EnvoyProxy. (This is the only option for now.) envoyProxy Configuration options related to the \"EnvoyProxy\" type of load balancing."},{"location":"configuration/#specnetworknodelocalloadbalancingenvoyproxy","title":"spec.network.nodeLocalLoadBalancing.envoyProxy","text":"

Configuration options required for using Envoy as the backing implementation for node-local load balancing.

Note: This type of load balancing is not supported on ARMv7 workers.

Element Description image The OCI image that's being used for the Envoy Pod. imagePullPolicy The pull policy being used used for the Envoy Pod. Defaults to spec.images.default_pull_policy if omitted. apiServerBindPort Port number on which to bind the Envoy load balancer for the Kubernetes API server to on a worker's loopback interface. Default: 7443. konnectivityServerBindPort Port number on which to bind the Envoy load balancer for the konnectivity server to on a worker's loopback interface. Default: 7132."},{"location":"configuration/#speccontrollermanager","title":"spec.controllerManager","text":"Element Description extraArgs Map of key-values (strings) for any extra arguments you want to pass down to the Kubernetes controller manager process."},{"location":"configuration/#specscheduler","title":"spec.scheduler","text":"Element Description extraArgs Map of key-values (strings) for any extra arguments you want to pass down to Kubernetes scheduler process."},{"location":"configuration/#specworkerprofiles","title":"spec.workerProfiles","text":"

Worker profiles are used to manage worker-specific configuration in a centralized manner. A ConfigMap is generated for each worker profile. Based on the --profile argument given to the k0s worker, the configuration in the corresponding ConfigMap is is picked up during startup.

The worker profiles are defined as an array. Each element has following properties:

Property Description name String; name to use as profile selector for the worker process values Object; Kubelet configuration overrides, see below for details"},{"location":"configuration/#specworkerprofilesvalues-kubelet-configuration-overrides","title":"spec.workerProfiles[].values (Kubelet configuration overrides)","text":"

The Kubelet configuration overrides of a profile override the defaults defined by k0s.

Note that there are several fields that cannot be overridden:

  • clusterDNS
  • clusterDomain
  • apiVersion
  • kind
  • staticPodURL
"},{"location":"configuration/#specfeaturegates","title":"spec.featureGates","text":"

Available components are:

  • kube-apiserver
  • kube-controller-manager
  • kubelet
  • kube-scheduler
  • kube-proxy

If components are omitted, propagates to all kube components.

Modifies extraArgs.

"},{"location":"configuration/#example","title":"Example","text":"
spec:\nfeatureGates:\n- name: feature-gate-0\nenabled: true\ncomponents: [\"kube-apiserver\", \"kube-controller-manager\", \"kubelet\", \"kube-scheduler\"]\n- name: feature-gate-1\nenabled: true\n- name: feature-gate-2\nenabled: false\n
"},{"location":"configuration/#kubelet-feature-gates-example","title":"Kubelet feature gates example","text":"

The below is an example of a k0s config with feature gates enabled:

spec:\nfeatureGates:\n- name: DevicePlugins\nenabled: true\ncomponents: [\"kubelet\"]\n- name: Accelerators\nenabled: true\ncomponents: [\"kubelet\"]\n- name: AllowExtTrafficLocalEndpoints\nenabled: false\n
"},{"location":"configuration/#configuration-examples","title":"Configuration examples","text":""},{"location":"configuration/#custom-volumeplugindir","title":"Custom volumePluginDir","text":"
spec:\nworkerProfiles:\n- name: custom-pluginDir\nvalues:\nvolumePluginDir: /var/libexec/k0s/kubelet-plugins/volume/exec\n
"},{"location":"configuration/#eviction-policy","title":"Eviction Policy","text":"
spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nevictionHard:\nmemory.available: \"500Mi\"\nnodefs.available: \"1Gi\"\nimagefs.available: \"100Gi\"\nevictionMinimumReclaim:\nmemory.available: \"0Mi\"\nnodefs.available: \"500Mi\"\nimagefs.available: \"2Gi\"\n
"},{"location":"configuration/#unsafe-sysctls","title":"Unsafe Sysctls","text":"
spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nallowedUnsafeSysctls:\n- fs.inotify.max_user_instances\n
"},{"location":"configuration/#specimages","title":"spec.images","text":"

Nodes under the images key all have the same basic structure:

spec:\nimages:\ncoredns:\nimage: quay.io/coredns/coredns\nversion: v1.7.0\n

If you want the list of default images and their versions to be included, use k0s config create --include-images.

"},{"location":"configuration/#available-keys","title":"Available keys","text":"
  • spec.images.konnectivity
  • spec.images.metricsserver
  • spec.images.kubeproxy
  • spec.images.coredns
  • spec.images.calico.cni
  • spec.images.calico.flexvolume
  • spec.images.calico.node
  • spec.images.calico.kubecontrollers
  • spec.images.kuberouter.cni
  • spec.images.kuberouter.cniInstaller
  • spec.images.repository\u00b9

\u00b9 If spec.images.repository is set and not empty, every image will be pulled from images.repository

If spec.images.default_pull_policy is set and not empty, it will be used as a pull policy for each bundled image.

"},{"location":"configuration/#image-example","title":"Image example","text":"
images:\nrepository: \"my.own.repo\"\nkonnectivity:\nimage: calico/kube-controllers\nversion: v3.16.2\nmetricsserver:\nimage: registry.k8s.io/metrics-server/metrics-server\nversion: v0.6.4\n

In the runtime the image names are calculated as my.own.repo/calico/kube-controllers:v3.16.2 and my.own.repo/metrics-server/metrics-server:v0.6.4. This only affects the the imgages pull location, and thus omitting an image specification here will not disable component deployment.

"},{"location":"configuration/#specextensionshelm","title":"spec.extensions.helm","text":"

spec.extensions.helm is the config file key in which you configure the list of Helm repositories and charts to deploy during cluster bootstrap (for more information, refer to Helm Charts).

"},{"location":"configuration/#specextensionsstorage","title":"spec.extensions.storage","text":"

spec.extensions.storage controls bundled storage provider. The default value external makes no storage deployed.

To enable embedded host-local storage provider use the following configuration:

spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
"},{"location":"configuration/#speckonnectivity","title":"spec.konnectivity","text":"

The spec.konnectivity key is the config file key in which you configure Konnectivity-related settings.

  • agentPort agent port to listen on (default 8132)
  • adminPort admin port to listen on (default 8133)
"},{"location":"configuration/#spectelemetry","title":"spec.telemetry","text":"

To improve the end-user experience k0s is configured by defaul to collect telemetry data from clusters and send it to the k0s development team. To disable the telemetry function, change the enabled setting to false.

The telemetry interval is ten minutes.

spec:\ntelemetry:\nenabled: true\n
"},{"location":"configuration/#disabling-controller-components","title":"Disabling controller components","text":"

k0s allows to completely disable some of the system components. This allows users to build a minimal Kubernetes control plane and use what ever components they need to fulfill their need for the control plane. Disabling the system components happens through a command line flag for the controller process:

--disable-components strings                     disable components (valid items: api-config,autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n

Note: As of k0s 1.26, the kubelet-config component has been replaced by the worker-config component. k0s will issue a warning when the old component name is being used. It is scheduled for removal in k0s 1.27. Please update to the new component name.

If you use k0sctl, just add the flag when installing the cluster for the first controller at spec.hosts.installFlags in the config file like e.g.:

spec:\nhosts:\n- role: controller\ninstallFlags:\n- --disable-components=metrics-server\n

As seen from the component list, the only always-on component is the Kubernetes api-server, without that k0s serves no purpose.

"},{"location":"conformance-testing/","title":"Kubernetes conformance testing for k0s","text":"

We run the conformance testing for the last RC build for a release. Follow the instructions as the conformance testing repository.

In a nutshell, you need to:

  • Setup k0s on some VMs/bare metal boxes
  • Download, if you do not already have, sonobuoy tool
  • Run the conformance tests with something like sonobuoy run --mode=certified-conformance
  • Wait for couple hours
  • Collect results
"},{"location":"containerd_config/","title":"Containerd config","text":"

See runtime.

"},{"location":"custom-ca/","title":"Install using custom CA certificate","text":"

k0s generates all needed certificates automatically in the <data-dir>/pki directory (/var/lib/k0s/pki, by default).

But sometimes there is a need to have the CA certificate in advance. To make it work, just put ca.key and ca.crt files to the <data-dir>/pki:

mkdir -p /var/lib/k0s/pki\ncd /var/lib/k0s/pki\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days 365 -out ca.crt -subj \"/CN=Custom CA\"\n

Then you can install k0s as usual.

"},{"location":"custom-ca/#pre-generated-tokens","title":"Pre-generated tokens","text":"

It's possible to get join in advance without having a running cluster.

k0s token pre-shared --role worker --cert /var/lib/k0s/pki/ca.crt --url https://<controller-ip>:6443/\n

The command above generates a join token and a Secret. A Secret should be deployed to the cluster to authorize the token. For example, you can put the Secret under the manifest directory and it will be deployed automatically.

"},{"location":"custom-cri-runtime/","title":"Custom cri runtime","text":"

See runtime.

"},{"location":"dockershim/","title":"Dockershim deprecation - what does it mean for K0s?","text":"

Back in December 2020, Kubernetes have announced the deprecation of the dockershim from version 1.24 onwards. As a consequence, k0s 1.24 and above don't support the dockershim as well.

"},{"location":"dockershim/#what-is-dockershim-and-why-was-it-deprecated","title":"What is dockershim and why was it deprecated?","text":"

The dockershim is a transparent library that intercepts API calls to the kubernetes API and handles their operation in the Docker API. Early versions of Kubernetes used this shim in order to allow containers to run over docker. Later versions of Kubernetes started creating containers via the CRI (Container Runtime Interface). Since CRI has become the de-facto default runtime for Kubernetes, maintaining the dockershim turned into a heavy burden for Kubernetes maintainers, and so the decision to deprecate the built-in dockershim support came into being.

"},{"location":"dockershim/#so-whats-going-to-happen-to-dockershim","title":"So what's going to happen to dockershim?","text":"

Dockershim is not gone. It's only changed ownership. Mirantis has agreed to maintain dockershim (now called cri-dockerd). See: The Future of Dockershim is cri-dockerd.

From Kubernetes version 1.24 you will have the built-in possibility to run containers via CRI, but if you want to continue using docker, you are free to do so, using cri-dockerd.

In order to continue to use the Docker engine with Kubernetes v1.24+, you will have to migrated all worker nodes to use cri-dockerd.

"},{"location":"dockershim/#migrating-to-cri-dockerd","title":"Migrating to CRI-Dockerd","text":"

This migration guide assumes that you've been running k0s with docker on version 1.23 and below.

The following steps will need to be done on ALL k0s' worker nodes, or single-node controllers. Basically any node that runs containers will need to be migrated using the process detailed below.

Please note that there are currently some pitfalls around container metrics when using CRI-dockerd.

"},{"location":"dockershim/#cordon-and-drain-the-node","title":"Cordon and drain the node","text":"

Get a list of all nodes (k0s is still version 1.23, which already includes the docker-shim):

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS   ROLES           AGE   VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready    control-plane   52m   v1.27.5+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready    <none>          12s   v1.27.5+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n

cordon and drain the nodes (migrate one by one):

sudo k0s kubectl cordon ip-10-0-62-250.eu-west-1.compute.internal \nsudo k0s kubectl drain ip-10-0-62-250.eu-west-1.compute.internal --ignore-daemonsets\n
sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS                     ROLES           AGE     VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready                      control-plane   56m     v1.27.5+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready,SchedulingDisabled   <none>          3m40s   v1.27.5+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n

Stop k0s on the node:

sudo k0s stop\n
"},{"location":"dockershim/#installing-cri-dockerd","title":"Installing CRI-Dockerd","text":"

Download the Latest cri-dockerd deb package:

cd /tmp\n\n# Get the deb file name for ubuntu-jammy\nOS=\"ubuntu-jammy\"\nPKG=$(curl -s https://api.github.com/repos/Mirantis/cri-dockerd/releases/latest | grep ${OS} | grep http | cut -d '\"' -f 4)\n\nwget ${PKG} -O cri-dockerd-latest.deb\n\nsudo dpkg -i cri-dockerd-latest.deb\n\nSelecting previously unselected package cri-dockerd.\n(Reading database ... 164618 files and directories currently installed.)\nPreparing to unpack cri-dockerd-latest.deb ...\nUnpacking cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nSetting up cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nCreated symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service \u2192 /lib/systemd/system/cri-docker.service.\nCreated symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket \u2192 /lib/systemd/system/cri-docker.socket.\n

Verify the correct version:

which cri-dockerd\n/usr/bin/cri-dockerd\n\ncri-dockerd --version\ncri-dockerd 0.2.1 (HEAD)\n

Make sure dockershim is started:

sudo systemctl status cri-docker.service\n\u25cf cri-docker.service - CRI Interface for Docker Application Container Engine\n     Loaded: loaded (/lib/systemd/system/cri-docker.service; enabled; vendor preset: enabled)\nActive: active (running) since Wed 2022-05-25 14:27:31 UTC; 1min 23s ago\nTriggeredBy: \u25cf cri-docker.socket\n       Docs: https://docs.mirantis.com\n   Main PID: 1404151 (cri-dockerd)\nTasks: 9\nMemory: 15.3M\n     CGroup: /system.slice/cri-docker.service\n             \u2514\u25001404151 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=\n
"},{"location":"dockershim/#configure-k0s-to-use-dockershim","title":"Configure K0s to use dockershim","text":"

Replace docker socket in the systemd file for cri-dockerd (the step below should be run AFTER upgrading k0s to version 1.24):

sudo sed -i -e 's_--cri-socket=docker:unix:///var/run/docker.sock_--cri-socket docker:unix:///var/run/cri-dockerd.sock_' /etc/systemd/system/k0sworker.service\nsudo systemctl daemon-reload\n
"},{"location":"dockershim/#start-k0s-with-cri-dockerd","title":"Start k0s with cri-dockerd","text":"
sudo k0s start\n

Verify the running pods via docker ps:

docker ps --format \"table {{.ID}}\\t{{.Names}}\\t{{.State}}\\t{{.Status}}\\t{{.Image}}\"\n\nCONTAINER ID   NAMES                                                                                                STATE     STATUS          IMAGE\n1b9b4624ddfd   k8s_konnectivity-agent_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_1   running   Up 51 minutes   quay.io/k0sproject/apiserver-network-proxy-agent\n414758a8a951   k8s_kube-router_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_1                 running   Up 51 minutes   3a67679337a5\nb81960bb304c   k8s_kube-proxy_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_1                   running   Up 51 minutes   quay.io/k0sproject/kube-proxy\nfb888cbc5ae0   k8s_POD_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_0                         running   Up 51 minutes   registry.k8s.io/pause:3.1\n382d0a938c9d   k8s_POD_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_0                  running   Up 51 minutes   registry.k8s.io/pause:3.1\n72d4a47b5609   k8s_POD_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_0                          running   Up 51 minutes   registry.k8s.io/pause:3.1\n

On the controller, you'll be able to see the worker started with the new docker container runtime:

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS                     ROLES           AGE    VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready                      control-plane   117m   v1.27.5+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready,SchedulingDisabled   <none>          64m    v1.27.5+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n
"},{"location":"dockershim/#uncordon-the-node","title":"Uncordon the Node","text":"
sudo k0s kubectl uncordon ip-10-0-62-250.eu-west-1.compute.internal\n\nnode/ip-10-0-62-250.eu-west-1.compute.internal uncordoned\n

You should now see the node Ready for scheduling with the docker Runtime:

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS   ROLES           AGE    VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready    control-plane   119m   v1.27.5+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready    <none>          66m    v1.27.5+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n
"},{"location":"dual-stack/","title":"Dual-stack Networking","text":"

Note: Dual stack networking setup requires that you configure Calico or a custom CNI as the CNI provider.

Use the following k0s.yaml as a template to enable dual-stack networking. This configuration will set up bundled calico CNI, enable feature gates for the Kubernetes components, and set up kubernetes-controller-manager.

spec:\nnetwork:\npodCIDR: \"10.244.0.0/16\"\nserviceCIDR: \"10.96.0.0/12\"\nprovider: calico\ncalico:\nmode: \"bird\"\ndualStack:\nenabled: true\nIPv6podCIDR: \"fd00::/108\"\nIPv6serviceCIDR: \"fd01::/108\"\n
"},{"location":"dual-stack/#cni-settings-calico","title":"CNI Settings: Calico","text":"

For cross-pod connectivity, use BIRD for the backend. Calico does not support tunneling for the IPv6, and thus VXLAN and IPIP backends do not work.

Note: In any Calico mode other than cross-pod, the pods can only reach pods on the same node.

"},{"location":"dual-stack/#cni-settings-external-cni","title":"CNI Settings: External CNI","text":"

Although the k0s.yaml dualStack section enables all of the neccessary feature gates for the Kubernetes components, for use with an external CNI it must be set up to support IPv6.

"},{"location":"dual-stack/#additional-resources","title":"Additional Resources","text":"
  • https://kubernetes.io/docs/concepts/services-networking/dual-stack/
  • https://kubernetes.io/docs/tasks/network/validate-dual-stack/
  • https://www.projectcalico.org/dual-stack-operation-with-calico-on-kubernetes/
  • https://docs.projectcalico.org/networking/ipv6
"},{"location":"dynamic-configuration/","title":"Dynamic configuration","text":"

k0s comes with the option to enable dynamic configuration for cluster level components. This covers all the components other than etcd (or sqlite) and the Kubernetes api-server. This option enables k0s configuration directly via Kubernetes API as opposed to using a configuration file for all cluster configuration.

This feature has to be enabled for every controller in the cluster using the --enable-dynamic-config flag in k0s controller or k0s install controller commands. Having both types of controllers in the same cluster will cause a conflict.

"},{"location":"dynamic-configuration/#dynamic-vs-static-configuration","title":"Dynamic vs. static configuration","text":"

The existing and enabled-by-default method is what we call static configuration. That's the way where the k0s process reads the config from the given YAML file (or uses the default config if no config is given by user) and configures every component accordingly. This means that for any configuration change the cluster admin has to restart all controllers on the cluster and have matching configs on each controller node.

In dynamic configuration mode the first controller to boot up when the cluster is created will use the given config YAML as a bootstrap configuration and stores it in the Kubernetes API. All the other controllers will find the config existing on the API and will use it as the source-of-truth for configuring all the components except for etcd and kube-apiserver. After the initial cluster bootstrap the source of truth for all controllers is the configuration object in the Kubernetes API.

"},{"location":"dynamic-configuration/#cluster-configuration-vs-controller-node-configuration","title":"Cluster configuration vs. controller node configuration","text":"

In the k0s configuration options there are some options that are cluster-wide and some that are specific to each controller node in the cluster. The following list outlines which options are controller node specific and have to be configured only via the local file:

  • spec.api - these options configure how the local Kubernetes API server is setup
  • spec.storage - these options configure how the local storage (etcd or sqlite) is setup

In case of HA control plane, all the controllers will need this part of the configuration as otherwise they will not be able to get the storage and Kubernetes API server running.

"},{"location":"dynamic-configuration/#configuration-location","title":"Configuration location","text":"

The cluster wide configuration is stored in the Kubernetes API as a custom resource called clusterconfig. There's currently only one instance named k0s. You can edit the configuration with what ever means possible, for example with:

k0s config edit\n

This will open the configuration object for editing in your system's default editor.

"},{"location":"dynamic-configuration/#configuration-reconciliation","title":"Configuration reconciliation","text":"

The dynamic configuration uses the typical operator pattern for operation. k0s controller will detect when the object changes and will reconcile the configuration changes to be reflected to how different components are configured. So say you want to change the MTU setting for kube-router CNI networking you'd change the config to contain e.g.:

    kuberouter:\nmtu: 1350\nautoMTU: false\n

This will change the kube-router related configmap and thus make kube-router to use different MTU settings for new pods.

"},{"location":"dynamic-configuration/#configuration-options","title":"Configuration options","text":"

The configuration object is a 1-to-1 mapping with the existing configuration YAML. All the configuration options EXCEPT options under spec.api and spec.storage are dynamically reconciled.

As with any Kubernetes cluster there are certain things that just cannot be changed on-the-fly, this is the list of non-changeable options:

  • network.podCIDR
  • network.serviceCIDR
  • network.provider
"},{"location":"dynamic-configuration/#configuration-status","title":"Configuration status","text":"

The dynamic configuration reconciler operator will write status events for all the changes it detects. To see all dynamic config related events, use:

k0s config status\n
LAST SEEN   TYPE      REASON                OBJECT              MESSAGE\n64s         Warning   FailedReconciling     clusterconfig/k0s   failed to validate config: [invalid pod CIDR invalid ip address]\n59s         Normal    SuccessfulReconcile   clusterconfig/k0s   Succesfully reconciler cluster config\n69s         Warning   FailedReconciling     clusterconfig/k0s   cannot change CNI provider from kuberouter to calico\n
"},{"location":"environment-variables/","title":"Environment variables","text":"

k0s install does not support environment variables.

Setting environment variables for components used by k0s depends on the used init system. The environment variables set in k0scontroller or k0sworker service will be inherited by k0s components, such as etcd, containerd, konnectivity, etc.

Component specific environment variables can be set in k0scontroller or k0sworker service. For example: for CONTAINERD_HTTPS_PROXY, the prefix CONTAINERD_ will be stripped and converted to HTTPS_PROXY in the containerd process.

For those components having env prefix convention such as ETCD_xxx, they are handled specially, i.e. the prefix will not be stripped. For example, ETCD_MAX_WALS will still be ETCD_MAX_WALS in etcd process.

The proxy envs HTTP_PROXY, HTTPS_PROXY, NO_PROXY are always overridden by component specific environment variables, so ETCD_HTTPS_PROXY will still be converted to HTTPS_PROXY in etcd process.

"},{"location":"environment-variables/#systemd","title":"SystemD","text":"

Create a drop-in directory and add config file with a desired environment variable:

mkdir -p /etc/systemd/system/k0scontroller.service.d\ntee -a /etc/systemd/system/k0scontroller.service.d/http-proxy.conf <<EOT\n[Service]\nEnvironment=HTTP_PROXY=192.168.33.10:3128\nEOT\n
"},{"location":"environment-variables/#openrc","title":"OpenRC","text":"

Export desired environment variable overriding service configuration in /etc/conf.d directory:

echo 'export HTTP_PROXY=\"192.168.33.10:3128\"' > /etc/conf.d/k0scontroller\n
"},{"location":"experimental-windows/","title":"Run k0s worker nodes in Windows","text":"

IMPORTANT: Windows support for k0s is under active development and must be considered experimental.

"},{"location":"experimental-windows/#prerequisites","title":"Prerequisites","text":"

The cluster must be running at least one worker node and control plane on Linux. You can use Windows to run additional worker nodes.

"},{"location":"experimental-windows/#run-k0s","title":"Run k0s","text":"

Note: The k0s.exe supervises kubelet.exe and kube-proxy.exe.

During the first run, the calico install script is created as C:\\bootstrap.ps1. This bootstrap script downloads the calico binaries, builds pause container and sets up vSwitch settings.

Install Mirantis Container Runtime on the Windows node(s), as it is required for the initial Calico set up).

k0s worker --cri-socket=docker:tcp://127.0.0.1:2375 --cidr-range=<cidr_range> --cluster-dns=<clusterdns> --api-server=<k0s api> <token>\n

You must initiate the Cluster control with the correct config.

"},{"location":"experimental-windows/#configuration","title":"Configuration","text":""},{"location":"experimental-windows/#strict-affinity","title":"Strict-affinity","text":"

You must enable strict affinity to run the windows node.

If the spec.network.calico.withWindowsNodes field is set to true (it is set to false by default) the additional calico related manifest /var/lib/k0s/manifests/calico/calico-IPAMConfig-ipamconfig.yaml is created with the following values:

---\napiVersion: crd.projectcalico.org/v1\nkind: IPAMConfig\nmetadata:\nname: default\nspec:\nstrictAffinity: true\n

Alternately, you can manually execute calicoctl:

calicoctl ipam configure --strictaffinity=true\n
"},{"location":"experimental-windows/#network-connectivity-in-aws","title":"Network connectivity in AWS","text":"

Disable the Change Source/Dest. Check option for the network interface attached to your EC2 instance. In AWS, the console option for the network interface is in the Actions menu.

"},{"location":"experimental-windows/#hacks","title":"Hacks","text":"

k0s offers the following CLI arguments in lieu of a formal means for passing cluster settings from controller plane to worker:

  • cidr-range
  • cluster-dns
  • api-server
"},{"location":"experimental-windows/#useful-commands","title":"Useful commands","text":""},{"location":"experimental-windows/#run-pod-with-cmdexe-shell","title":"Run pod with cmd.exe shell","text":"
kubectl run win --image=hello-world:nanoserver --command=true -i --attach=true -- cmd.exe\n
"},{"location":"experimental-windows/#manifest-for-pod-with-iis-web-server","title":"Manifest for pod with IIS web-server","text":"
apiVersion: v1\nkind: Pod\nmetadata:\nname: iis\nspec:\ncontainers:\n- name: iis\nimage: mcr.microsoft.com/windows/servercore/iis\nimagePullPolicy: IfNotPresent\n
"},{"location":"extensions/","title":"Cluster extensions","text":"

k0s allows users to use extensions to extend cluster functionality.

At the moment the only supported type of extensions is helm based charts.

The default configuration has no extensions.

"},{"location":"extensions/#helm-based-extensions","title":"Helm based extensions","text":""},{"location":"extensions/#configuration-example","title":"Configuration example","text":"
helm:\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"11.16.8\"\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n

By using the configuration above, the cluster would:

  • add stable and prometheus-community chart repositories
  • install the prometheus-community/prometheus chart of the specified version to the default namespace.

The chart installation is implemented by using CRD helm.k0sproject.io/Chart. For every given helm extension the cluster creates a Chart CRD instance. The cluster has a controller which monitors for the Chart CRDs, supporting the following operations:

  • install
  • upgrade
  • delete

For security reasons, the cluster operates only on Chart CRDs instantiated in the kube-system namespace, however, the target namespace could be any.

"},{"location":"extensions/#crd-definition","title":"CRD definition","text":"
apiVersion: helm.k0sproject.io/v1beta1\nkind: Chart\nmetadata:\ncreationTimestamp: \"2020-11-10T14:17:53Z\"\ngeneration: 2\nlabels:\nk0s.k0sproject.io/stack: helm\nname: k0s-addon-chart-test-addon\nnamespace: kube-system\nresourceVersion: \"627\"\nselfLink: /apis/helm.k0sproject.io/v1beta1/namespaces/kube-system/charts/k0s-addon-chart-test-addon\nuid: ebe59ed4-1ff8-4d41-8e33-005b183651ed\nspec:\nchartName: prometheus-community/prometheus\nnamespace: default\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nversion: 11.16.8\nstatus:\nappVersion: 2.21.0\nnamespace: default\nreleaseName: prometheus-1605017878\nrevision: 2\nupdated: 2020-11-10 14:18:08.235656 +0000 UTC m=+41.871656901\nversion: 11.16.8\n

The Chart.spec defines the chart information.

The Chart.status keeps the information about the last operation performed by the operator.

"},{"location":"external-runtime-deps/","title":"External runtime dependencies","text":"

k0s is packaged as a single binary, which includes all the needed components. All the binaries are statically linked which means that in typical use cases there's an absolute minimum of external runtime dependencies.

However, depending on the node role and cluster configuration, some of the underlying components may have specific dependencies, like OS level tools, packages and libraries. This page aims to provide a comprehensive overview.

The following command checks for known requirements on a host (currently only available on Linux):

k0s sysinfo\n
"},{"location":"external-runtime-deps/#a-unique-machine-id-for-multi-node-setups","title":"A unique machine ID for multi-node setups","text":"

Whenever k0s is run in a multi-node setup (i.e. the --single command line flag isn't used), k0s requires a machine ID: a unique host identifier that is somewhat stable across reboots. For Linux, this ID is read from the files /var/lib/dbus/machine-id or /etc/machine-id. For Windows, it's taken from the registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Cryptography\\MachineGuid. If neither of the OS specific sources yield a result, k0s will fallback to use a machine ID based on the hostname.

When running k0s on top of virtualized or containerized environments, you need to ensure that hosts get their own unique IDs, even if they have been created from the same image.

"},{"location":"external-runtime-deps/#linux-specific","title":"Linux specific","text":""},{"location":"external-runtime-deps/#linux-kernel-configuration","title":"Linux kernel configuration","text":"

Needless to say, as k0s operates Kubernetes worker nodes, there's a certain number of needed Linux kernel modules and configurations that we need in the system. This basically stems from the need to run both containers and also be able to set up networking for the containers.

The needed kernel configuration items are listed below. All of them are available in Kernel versions 4.3 and above. If running on older kernels, check if the distro in use has backported some features; nevertheless, it might meet the requirements. k0s will check the Linux kernel release as part of its pre-flight checks and issue a warning if it's below 3.10.

The list covers ONLY the k0s/kubernetes components\u2019 needs on worker nodes. Your own workloads may require more.

  • CONFIG_CGROUPS: Control Group support
    • CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem
    • CONFIG_CGROUP_PIDS: PIDs cgroup subsystem kubernetes/kubeadm#2335 (comment)
    • CONFIG_CGROUP_DEVICE: Device controller for cgroups
    • CONFIG_CPUSETS: Cpuset support
    • CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem
    • CONFIG_MEMCG: Memory Resource Controller for Control Groups
    • (optional) CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups kubernetes/kubeadm#2335 (comment)
    • CONFIG_CGROUP_SCHED: Group CPU scheduler
      • CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER kubernetes/kubeadm#2335 (comment)
        • (optional) CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED Required if CPU CFS quota enforcement is enabled for containers that specify CPU limits (--cpu-cfs-quota).
    • (optional) CONFIG_BLK_CGROUP: Block IO controller kubernetes/kubernetes#92287 (comment)
  • CONFIG_NAMESPACES: Namespaces support
    • CONFIG_UTS_NS: UTS namespace
    • CONFIG_IPC_NS: IPC namespace
    • CONFIG_PID_NS: PID namespace
    • CONFIG_NET_NS: Network namespace
  • CONFIG_NET: Networking support
    • CONFIG_INET: TCP/IP networking
    • CONFIG_NETFILTER: Network packet filtering framework (Netfilter)
      • (optional) CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration
      • CONFIG_NETFILTER_XTABLES: Netfilter Xtables support
        • CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support
        • CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support
  • CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem
  • CONFIG_PROC_FS: /proc file system support

Note: As part of its pre-flight checks, k0s will try to inspect and validate the kernel configuration. In order for that to succeed, the configuration needs to be accessible at runtime. There are some typical places that k0s will check. A bullet-proof way to ensure the accessibility is to enable CONFIG_IKCONFIG_PROC, and, if enabled as a module, to load the configs module: modprobe configs.

"},{"location":"external-runtime-deps/#control-groups-cgroups","title":"Control Groups (cgroups)","text":"

Both cgroup v1 and cgroup v2 are supported.

Required cgroup controllers:

  • cpu
  • cpuacct
  • cpuset
  • memory
  • devices
  • freezer
  • pids

Optional cgroup controllers:

  • hugetlb (kubernetes/kubeadm#2335 (comment))
  • blkio (kubernetes/kubernetes#92287 (comment)) containerd and cri-o will use blkio to track disk I/O and throttling in both cgroup v1 and v2.
"},{"location":"external-runtime-deps/#external-soft-dependencies","title":"External soft dependencies","text":"

There are a few external tools that may be needed or used under specific circumstances:

"},{"location":"external-runtime-deps/#containerd-and-apparmor","title":"containerd and AppArmor","text":"

In order to use containerd in conjunction with AppArmor, it must be enabled in the kernel and the /sbin/apparmor_parser executable must be installed on the host, otherwise containerd will disable AppArmor support.

"},{"location":"external-runtime-deps/#iptables","title":"iptables","text":"

iptables may be executed to detect if there are any existing iptables rules and if those are in legacy of nft mode. If iptables is not found, k0s will assume that there are no pre-existing iptables rules.

"},{"location":"external-runtime-deps/#useradd-adduser","title":"useradd / adduser","text":"

During k0s install the external tool useradd will be used on the controllers to create system user accounts for k0s. If this does exist it will fall-back to busybox's adduser.

"},{"location":"external-runtime-deps/#userdel-deluser","title":"userdel / deluser","text":"

k0s reset will execute either userdel or deluser to clean up system user accounts.

"},{"location":"external-runtime-deps/#modprobe","title":"modprobe","text":"

On k0s worker will modprobe be executed to load missing kernel modules if they are not detected.

"},{"location":"external-runtime-deps/#id","title":"id","text":"

External /usr/bin/id will be executed as a fallback if local user lookup fails, in case NSS is used.

"},{"location":"external-runtime-deps/#other-dependencies-in-previous-versions-of-k0s","title":"Other dependencies in previous versions of k0s","text":"
  • up until k0s v1.21.9+k0s.0: iptables Required for worker nodes. Resolved by @ncopa in #1046 by adding iptables and friends to k0s's embedded binaries.
  • up until k0s v1.21.7+k0s.0: find, du and nice Required for worker nodes. Resolved upstream by @ncopa in kubernetes/kubernetes#96115, contained in Kubernetes 1.21.8 (5b13c8f68d4) and 1.22.0 (d45ba645a8f).
"},{"location":"external-runtime-deps/#windows-specific","title":"Windows specific","text":"

TBD.

"},{"location":"helm-charts/","title":"Helm Charts","text":"

Defining your extensions as Helm charts is one of two methods you can use to run k0s with your preferred extensions (the other being through the use of Manifest Deployer).

k0s supports two methods for deploying applications using Helm charts:

  • Use Helm command in runtime to install applications. Refer to the Helm Quickstart Guide for more information.
  • Insert Helm charts directly into the k0s configuration file, k0s.yaml. This method does not require a separate install of helm tool and the charts automatically deploy at the k0s bootstrap phase.
"},{"location":"helm-charts/#helm-charts-in-k0s-configuration","title":"Helm charts in k0s configuration","text":"

Adding Helm charts into the k0s configuration file gives you a declarative way in which to configure the cluster. k0s controller manages the setup of Helm charts that are defined as extensions in the k0s configuration file.

"},{"location":"helm-charts/#wait-for-install","title":"Wait for install","text":"

Each chart is proccesed the same way CLI tool does with following options:

  • --wait
  • --wait-for-jobs
  • --timeout 10m

It is possible to customize timeout by using .Timeout field.

"},{"location":"helm-charts/#chart-configuration","title":"Chart configuration","text":"Field Default value Description name - Release name chartname - chartname in form \"repository/chartname\" or path to tgz file version - version to install timeout 10m timeout to wait for release install values - yaml as a string, custom chart values namespace - namespace to install chart into order 0 order to apply manifest. For equal values, alphanum ordering is used"},{"location":"helm-charts/#example","title":"Example","text":"

In the example, Prometheus is configured from \"stable\" Helms chart repository. Add the following to k0s.yaml and restart k0s, after which Prometheus should start automatically with k0s.

spec:\nextensions:\nhelm:\nconcurrencyLevel: 5\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"14.6.1\"\ntimeout: 20m\norder: 1\nvalues: |\nalertmanager:\npersistentVolume:\nenabled: false\nserver:\npersistentVolume:\nenabled: false\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\norder: 2\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist on all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\norder: 2 values: \"\"\nnamespace: default\n

Example extensions that you can use with Helm charts include:

  • Ingress controllers: Nginx ingress, Traefix ingress (refer to the k0s documentation for Installing the Traefik Ingress Controller)
  • Volume storage providers: OpenEBS, Rook, Longhorn
  • Monitoring: Prometheus, Grafana
"},{"location":"helm-charts/#helm-debug-logging","title":"Helm debug logging","text":"

Running k0s controller with --debug=true enables helm debug logging.

"},{"location":"high-availability/","title":"Control Plane High Availability","text":"

You can create high availability for the control plane by distributing the control plane across multiple nodes and installing a load balancer on top. Etcd can be colocated with the controller nodes (default in k0s) to achieve highly available datastore at the same time.

Note: In this context even 2 node controlplane is considered HA even though it's not really HA from etcd point of view. The same requirement for LB still applies.

"},{"location":"high-availability/#network-considerations","title":"Network considerations","text":"

You should plan to allocate the control plane nodes into different zones. This will avoid failures in case one zone fails.

For etcd high availability it's recommended to configure 3 or 5 controller nodes. For more information, refer to the etcd documentation.

"},{"location":"high-availability/#load-balancer","title":"Load Balancer","text":"

Control plane high availability requires a tcp load balancer, which acts as a single point of contact to access the controllers. The load balancer needs to allow and route traffic to each controller through the following ports:

  • 6443 (for Kubernetes API)
  • 8132 (for Konnectivity)
  • 9443 (for controller join API)

The load balancer can be implemented in many different ways and k0s doesn't have any additional requirements. You can use for example HAProxy, NGINX or your cloud provider's load balancer.

"},{"location":"high-availability/#example-configuration-haproxy","title":"Example configuration: HAProxy","text":"

Add the following lines to the end of the haproxy.cfg:

frontend kubeAPI\n    bind :6443\n    mode tcp\n    default_backend kubeAPI_backend\nfrontend konnectivity\n    bind :8132\n    mode tcp\n    default_backend konnectivity_backend\nfrontend controllerJoinAPI\n    bind :9443\n    mode tcp\n    default_backend controllerJoinAPI_backend\n\nbackend kubeAPI_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:6443 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:6443 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:6443 check check-ssl verify none\nbackend konnectivity_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:8132 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:8132 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:8132 check check-ssl verify none\nbackend controllerJoinAPI_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:9443 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:9443 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:9443 check check-ssl verify none\n\nlisten stats\n   bind *:9000\n   mode http\n   stats enable\n   stats uri /\n

The last block \"listen stats\" is optional, but can be helpful. It enables HAProxy statistics with a separate dashboard to monitor for example the health of each backend server. You can access it using a web browser:

http://<ip-addr>:9000\n

Restart HAProxy to apply the configuration changes.

"},{"location":"high-availability/#k0s-configuration","title":"k0s configuration","text":"

The load balancer address must be configured to k0s either by using k0s.yaml or by using k0sctl to automatically deploy all controllers with the same configuration:

"},{"location":"high-availability/#configuration-using-k0syaml-for-each-controller","title":"Configuration using k0s.yaml (for each controller)","text":"

Note to update your load balancer's public ip address into two places.

spec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
"},{"location":"high-availability/#configuration-using-k0sctlyaml-for-k0sctl","title":"Configuration using k0sctl.yaml (for k0sctl)","text":"

Add the following lines to the end of the k0sctl.yaml. Note to update your load balancer's public ip address into two places.

  k0s:\nconfig:\nspec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n

For greater detail about k0s configuration, refer to the Full configuration file reference.

"},{"location":"install/","title":"Quick Start Guide","text":"

On completion of the Quick Start you will have a full Kubernetes cluster with a single node that includes both the controller and the worker. Such a setup is ideal for environments that do not require high-availability and multiple nodes.

"},{"location":"install/#prerequisites","title":"Prerequisites","text":"

Note: Before proceeding, make sure to review the System Requirements.

Though the Quick Start material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.

"},{"location":"install/#install-k0s","title":"Install k0s","text":"
  1. Download k0s

    Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.

    curl -sSLf https://get.k0s.sh | sudo sh\n
  2. Install k0s as a service

    The k0s install sub-command installs k0s as a system service on the local host that is running one of the supported init systems: Systemd or OpenRC. You can execute the install for workers, controllers or single node (controller+worker) instances.

    Run the following command to install a single node k0s that includes the controller and worker functions with the default configuration:

    sudo k0s install controller --single\n

    The k0s install controller sub-command accepts the same flags and parameters as the k0s controller. Refer to manual install for a custom config file example.

    It is possible to set environment variables with the install command:

    sudo k0s install controller -e ETCD_UNSUPPORTED_ARCH=arm\n

    The system service can be reinstalled with the --force flag:

    sudo k0s install controller --single --force\nsudo systemctl daemon-reload\n
  3. Start k0s as a service

    To start the k0s service, run:

    sudo k0s start\n

    The k0s service will start automatically after the node restart.

    A minute or two typically passes before the node is ready to deploy applications.

  4. Check service, logs and k0s status

    To get general information about your k0s instance's status, run:

    $ sudo k0s status\nVersion: v1.27.5+k0s.0\nProcess ID: 436\nRole: controller\nWorkloads: true\nInit System: linux-systemd\n
  5. Access your cluster using kubectl

    Note: k0s includes the Kubernetes command-line tool kubectl.

    Use kubectl to deploy your application or to check your node status:

    $ sudo k0s kubectl get nodes\nNAME   STATUS   ROLES    AGE    VERSION\nk0s    Ready    <none>   4m6s   v1.27.5+k0s\n
"},{"location":"install/#uninstall-k0s","title":"Uninstall k0s","text":"

The removal of k0s is a two-step process.

  1. Stop the service.

    sudo k0s stop\n
  2. Execute the k0s reset command.

    The k0s reset command cleans up the installed system service, data directories, containers, mounts and network namespaces.

    sudo k0s reset\n
  3. Reboot the system.

    A few small k0s fragments persist even after the reset (for example, iptables). As such, you should initiate a reboot after the running of the k0s reset command.

"},{"location":"install/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Manual Install: (Advanced) Manually deploy multi-node clusters
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
  • Airgap/Offline installation: Airgap deployment
"},{"location":"k0s-in-docker/","title":"Run k0s in Docker","text":"

You can create a k0s cluster on top of docker. In such a scenario, by default, both controller and worker nodes are run in the same container to provide an easy local testing \"cluster\".

"},{"location":"k0s-in-docker/#prerequisites","title":"Prerequisites","text":"

You will require a Docker environment running on a Mac, Windows, or Linux system.

"},{"location":"k0s-in-docker/#container-images","title":"Container images","text":"

The k0s containers are published both on Docker Hub and GitHub. For reasons of simplicity, the examples given here use Docker Hub (GitHub requires a separate authentication that is not covered). Alternative links include:

  • docker.io/k0sproject/k0s:1.27.5-k0s.0
  • ghcr.io/k0sproject/k0s:1.27.5-k0s.0

Note: Due to Docker Hub tag validation scheme, we have to use - as the k0s version separator instead of the usual +. So for example k0s version v1.27.5+k0s.0 is tagged as docker.io/k0sproject/k0s:v1.27.5-k0s.0.

"},{"location":"k0s-in-docker/#start-k0s","title":"Start k0s","text":""},{"location":"k0s-in-docker/#1-initiate-k0s","title":"1. Initiate k0s","text":"

You can run your own k0s in Docker:

docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n

Note: If you are using Docker Desktop as the runtime, starting from 4.3.0 version it's using cgroups v2 in the VM that runs the engine. This means you have to add some extra flags to the above command to get kubelet and containerd to properly work with cgroups v2:

--cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw\n
"},{"location":"k0s-in-docker/#2-optional-create-additional-workers","title":"2. (Optional) Create additional workers","text":"

You can attach multiple workers nodes into the cluster to then distribute your application containers to separate workers.

For each required worker:

  1. Acquire a join token for the worker:

    token=$(docker exec -t -i k0s k0s token create --role=worker)\n
  2. Run the container to create and join the new worker:

    docker run -d --name k0s-worker1 --hostname k0s-worker1 --privileged -v /var/lib/k0s docker.io/k0sproject/k0s:latest k0s worker $token\n
"},{"location":"k0s-in-docker/#3-access-your-cluster","title":"3. Access your cluster","text":"

Access your cluster using kubectl:

docker exec k0s kubectl get nodes\n

Alternatively, grab the kubeconfig file with docker exec k0s cat /var/lib/k0s/pki/admin.conf and paste it into Lens.

"},{"location":"k0s-in-docker/#use-docker-compose-alternative","title":"Use Docker Compose (alternative)","text":"

As an alternative you can run k0s using Docker Compose:

version: \"3.9\"\nservices:\nk0s:\ncontainer_name: k0s\nimage: docker.io/k0sproject/k0s:latest\ncommand: k0s controller --config=/etc/k0s/config.yaml --enable-worker\nhostname: k0s\nprivileged: true\nvolumes:\n- \"/var/lib/k0s\"\ntmpfs:\n- /run\n- /var/run\nports:\n- \"6443:6443\"\nnetwork_mode: \"bridge\"\nenvironment:\nK0S_CONFIG: |-\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\n# Any additional configuration goes here ...\n
"},{"location":"k0s-in-docker/#known-limitations","title":"Known limitations","text":""},{"location":"k0s-in-docker/#no-custom-docker-networks","title":"No custom Docker networks","text":"

Currently, k0s nodes cannot be run if the containers are configured to use custom networks (for example, with --net my-net). This is because Docker sets up a custom DNS service within the network which creates issues with CoreDNS. No completely reliable workaounds are available, however no issues should arise from running k0s cluster(s) on a bridge network.

"},{"location":"k0s-in-docker/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"k0s-multi-node/","title":"Manual Install (Advanced)","text":"

You can manually set up k0s nodes by creating a multi-node cluster that is locally managed on each node. This involves several steps, to first install each node separately, and to then connect the node together using access tokens.

"},{"location":"k0s-multi-node/#prerequisites","title":"Prerequisites","text":"

Note: Before proceeding, make sure to review the System Requirements.

Though the Manual Install material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.

You can speed up the use of the k0s command by enabling shell completion.

"},{"location":"k0s-multi-node/#install-k0s","title":"Install k0s","text":""},{"location":"k0s-multi-node/#1-download-k0s","title":"1. Download k0s","text":"

Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.

curl -sSLf https://get.k0s.sh | sudo sh\n

The download script accepts the following environment variables:

Variable Purpose K0S_VERSION=v1.27.5+k0s.0 Select the version of k0s to be installed DEBUG=true Output commands and their arguments at execution.

Note: If you require environment variables and use sudo, you can do:

curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.27.5+k0s.0 sh\n
"},{"location":"k0s-multi-node/#2-bootstrap-a-controller-node","title":"2. Bootstrap a controller node","text":"

Create a configuration file:

mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n

Note: For information on settings modification, refer to the configuration documentation.

sudo k0s install controller -c /etc/k0s/k0s.yaml\n
sudo k0s start\n

k0s process acts as a \"supervisor\" for all of the control plane components. In moments the control plane will be up and running.

"},{"location":"k0s-multi-node/#3-create-a-join-token","title":"3. Create a join token","text":"

You need a token to join workers to the cluster. The token embeds information that enables mutual trust between the worker and controller(s) and which allows the node to join the cluster as worker.

To get a token, run the following command on one of the existing controller nodes:

sudo k0s token create --role=worker\n

The resulting output is a long token string, which you can use to add a worker to the cluster.

For enhanced security, run the following command to set an expiration time for the token:

sudo k0s token create --role=worker --expiry=100h > token-file\n
"},{"location":"k0s-multi-node/#4-add-workers-to-the-cluster","title":"4. Add workers to the cluster","text":"

To join the worker, run k0s in the worker mode with the join token you created:

sudo k0s install worker --token-file /path/to/token/file\n
sudo k0s start\n
"},{"location":"k0s-multi-node/#about-tokens","title":"About tokens","text":"

The join tokens are base64-encoded kubeconfigs for several reasons:

  • Well-defined structure
  • Capable of direct use as bootstrap auth configs for kubelet
  • Embedding of CA info for mutual trust

The bearer token embedded in the kubeconfig is a bootstrap token. For controller join tokens and worker join tokens k0s uses different usage attributes to ensure that k0s can validate the token role on the controller side.

"},{"location":"k0s-multi-node/#5-add-controllers-to-the-cluster","title":"5. Add controllers to the cluster","text":"

Note: Either etcd or an external data store (MySQL or Postgres) via kine must be in use to add new controller nodes to the cluster. Pay strict attention to the high availability configuration and make sure the configuration is identical for all controller nodes.

To create a join token for the new controller, run the following command on an existing controller:

sudo k0s token create --role=controller --expiry=1h > token-file\n

On the new controller, run:

sudo k0s install controller --token-file /path/to/token/file -c /etc/k0s/k0s.yaml\n

Important notice here is that each controller in the cluster must have k0s.yaml otherwise some cluster nodes will use default config values which will lead to inconsistency behavior. If your configuration file includes IP addresses (node address, sans, etcd peerAddress), remember to update them accordingly for this specific controller node.

sudo k0s start\n
"},{"location":"k0s-multi-node/#6-check-k0s-status","title":"6. Check k0s status","text":"

To get general information about your k0s instance's status:

 sudo k0s status\n
Version: v1.27.5+k0s.0\nProcess ID: 2769\nParent Process ID: 1\nRole: controller\nInit System: linux-systemd\nService file: /etc/systemd/system/k0scontroller.service\n
"},{"location":"k0s-multi-node/#7-access-your-cluster","title":"7. Access your cluster","text":"

Use the Kubernetes 'kubectl' command-line tool that comes with k0s binary to deploy your application or check your node status:

sudo k0s kubectl get nodes\n
NAME   STATUS   ROLES    AGE    VERSION\nk0s    Ready    <none>   4m6s   v1.27.5+k0s\n

You can also access your cluster easily with Lens, simply by copying the kubeconfig and pasting it to Lens:

sudo cat /var/lib/k0s/pki/admin.conf\n

Note: To access the cluster from an external network you must replace localhost in the kubeconfig with the host ip address for your controller.

"},{"location":"k0s-multi-node/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"k0s-single-node/","title":"K0s single node","text":"

See the Quick Start Guide.

"},{"location":"k0sctl-install/","title":"Install using k0sctl","text":"

k0sctl is a command-line tool for bootstrapping and managing k0s clusters. k0sctl connects to the provided hosts using SSH and gathers information on the hosts, with which it forms a cluster by configuring the hosts, deploying k0s, and then connecting the k0s nodes together.

With k0sctl, you can create multi-node clusters in a manner that is automatic and easily repeatable. This method is recommended for production cluster installation.

Note: The k0sctl install method is necessary for automatic upgrade.

"},{"location":"k0sctl-install/#prerequisites","title":"Prerequisites","text":"

You can execute k0sctl on any system that supports the Go language. Pre-compiled k0sctl binaries are available on the k0sctl releases page).

Note: For target host prerequisites information, refer to the k0s System Requirements.

"},{"location":"k0sctl-install/#install-k0s","title":"Install k0s","text":""},{"location":"k0sctl-install/#1-install-k0sctl-tool","title":"1. Install k0sctl tool","text":"

k0sctl is a single binary, the instructions for downloading and installing of which are available in the k0sctl github repository.

"},{"location":"k0sctl-install/#2-configure-the-cluster","title":"2. Configure the cluster","text":"
  1. Run the following command to create a k0sctl configuration file:

    k0sctl init > k0sctl.yaml\n

    This action creates a k0sctl.yaml file in the current directory:

    apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nhosts:\n- role: controller\nssh:\naddress: 10.0.0.1 # replace with the controller's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n- role: worker\nssh:\naddress: 10.0.0.2 # replace with the worker's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n
  2. Provide each host with a valid IP address that is reachable by k0ctl, and the connection details for an SSH connection.

Note: Refer to the k0sctl documentation for k0sctl configuration specifications.

"},{"location":"k0sctl-install/#3-deploy-the-cluster","title":"3. Deploy the cluster","text":"

Run k0sctl apply to perform the cluster deployment:

k0sctl apply --config k0sctl.yaml\n
\u2800\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588    \u2588\u2588\u2588    \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\n\nINFO k0sctl 0.0.0 Copyright 2021, Mirantis Inc.\nINFO Anonymized telemetry will be sent to Mirantis.\nINFO By continuing to use k0sctl you agree to these terms:\nINFO https://k0sproject.io/licenses/eula\nINFO ==> Running phase: Connect to hosts\nINFO [ssh] 10.0.0.1:22: connected\nINFO [ssh] 10.0.0.2:22: connected\nINFO ==> Running phase: Detect host operating systems\nINFO [ssh] 10.0.0.1:22: is running Ubuntu 20.10\nINFO [ssh] 10.0.0.2:22: is running Ubuntu 20.10\nINFO ==> Running phase: Prepare hosts\nINFO [ssh] 10.0.0.1:22: installing kubectl\nINFO ==> Running phase: Gather host facts\nINFO [ssh] 10.0.0.1:22: discovered 10.12.18.133 as private address\nINFO ==> Running phase: Validate hosts\nINFO ==> Running phase: Gather k0s facts\nINFO ==> Running phase: Download K0s on the hosts\nINFO [ssh] 10.0.0.2:22: downloading k0s 0.11.0\nINFO [ssh] 10.0.0.1:22: downloading k0s 0.11.0\nINFO ==> Running phase: Configure K0s\nWARN [ssh] 10.0.0.1:22: generating default configuration\nINFO [ssh] 10.0.0.1:22: validating configuration\nINFO [ssh] 10.0.0.1:22: configuration was changed\nINFO ==> Running phase: Initialize K0s Cluster\nINFO [ssh] 10.0.0.1:22: installing k0s controller\nINFO [ssh] 10.0.0.1:22: waiting for the k0s service to start\nINFO [ssh] 10.0.0.1:22: waiting for kubernetes api to respond\nINFO ==> Running phase: Install workers\nINFO [ssh] 10.0.0.1:22: generating token\nINFO [ssh] 10.0.0.2:22: writing join token\nINFO [ssh] 10.0.0.2:22: installing k0s worker\nINFO [ssh] 10.0.0.2:22: starting service\nINFO [ssh] 10.0.0.2:22: waiting for node to become ready\nINFO ==> Running phase: Disconnect from hosts\nINFO ==> Finished in 2m2s\nINFO k0s cluster version 0.11.0 is now installed\nINFO Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO      k0sctl kubeconfig\n
"},{"location":"k0sctl-install/#4-access-the-cluster","title":"4. Access the cluster","text":"

To access your k0s cluster, use k0sctl to generate a kubeconfig for the purpose.

k0sctl kubeconfig > kubeconfig\n

With the kubeconfig, you can access your cluster using either kubectl or Lens.

kubectl get pods --kubeconfig kubeconfig -A\n
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE\nkube-system   calico-kube-controllers-5f6546844f-w8x27   1/1     Running   0          3m50s\nkube-system   calico-node-vd7lx                          1/1     Running   0          3m44s\nkube-system   coredns-5c98d7d4d8-tmrwv                   1/1     Running   0          4m10s\nkube-system   konnectivity-agent-d9xv2                   1/1     Running   0          3m31s\nkube-system   kube-proxy-xp9r9                           1/1     Running   0          4m4s\nkube-system   metrics-server-6fbcd86f7b-5frtn            1/1     Running   0          3m51s\n
"},{"location":"k0sctl-install/#known-limitations","title":"Known limitations","text":"
  • k0sctl does not perform any discovery of hosts, and thus it only operates on the hosts listed in the provided configuration.
  • k0sctl can only add more nodes to the cluster. It cannot remove existing nodes.
"},{"location":"k0sctl-install/#next-steps","title":"Next Steps","text":"
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"manifests/","title":"Manifest Deployer","text":"

Included with k0s, Manifest Deployer is one of two methods you can use to run k0s with your preferred extensions (the other being by defining your extensions as Helm charts).

"},{"location":"manifests/#overview","title":"Overview","text":"

Manifest Deployer runs on the controller nodes and provides an easy way to automatically deploy manifests at runtime.

By default, k0s reads all manifests under /var/lib/k0s/manifests and ensures that their state matches the cluster state. Moreover, on removal of a manifest file, k0s will automatically prune all of it associated resources.

The use of Manifest Deployer is quite similar to the use the kubectl apply command. The main difference between the two is that Manifest Deployer constantly monitors the directory for changes, and thus you do not need to manually apply changes that are made to the manifest files.

"},{"location":"manifests/#note","title":"Note","text":"
  • Each directory that is a direct descendant of /var/lib/k0s/manifests is considered to be its own \"stack\". Nested directories (further subfolders), however, are excluded from the stack mechanism and thus are not automatically deployed by the Manifest Deployer.
  • k0s uses the indepenent stack mechanism for some of its internal in-cluster components, as well as for other resources. Be sure to only touch the manifests that are not managed by k0s.
  • Explicitly define the namespace in the manifests (Manifest Deployer does not have a default namespace).
"},{"location":"manifests/#example","title":"Example","text":"

To try Manifest Deployer, create a new folder under /var/lib/k0s/manifests and then create a manifest file (such as nginx.yaml) with the following content:

apiVersion: v1\nkind: Namespace\nmetadata:\nname: nginx\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx-deployment\nnamespace: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nreplicas: 3\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- name: nginx\nimage: nginx:latest\nports:\n- containerPort: 80\n

New pods will appear soon thereafter.

sudo k0s kubectl get pods --namespace nginx\n
NAME                                READY   STATUS    RESTARTS   AGE\nnginx-deployment-66b6c48dd5-8zq7d   1/1     Running   0          10m\nnginx-deployment-66b6c48dd5-br4jv   1/1     Running   0          10m\nnginx-deployment-66b6c48dd5-sqvhb   1/1     Running   0          10m\n
"},{"location":"networking/","title":"Networking","text":""},{"location":"networking/#in-cluster-networking","title":"In-cluster networking","text":"

k0s supports two Container Network Interface (CNI) providers out-of-box, Kube-router and Calico. In addition, k0s can support your own CNI configuration.

"},{"location":"networking/#notes","title":"Notes","text":"
  • When deploying k0s with the default settings, all pods on a node can communicate with all pods on all nodes. No configuration changes are needed to get started.
  • Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment.
"},{"location":"networking/#kube-router","title":"Kube-router","text":"

Kube-router is built into k0s, and so by default the distribution uses it for network provision. Kube-router uses the standard Linux networking stack and toolset, and you can set up CNI networking without any overlays by using BGP as the main mechanism for in-cluster networking.

  • Supports armv7 (among many other archs)
  • Uses bit less resources (~15%)
  • Does NOT support dual-stack (IPv4/IPv6) networking
  • Does NOT support Windows nodes
  • Does NOT activate hairpin mode by default
"},{"location":"networking/#calico","title":"Calico","text":"

In addition to Kube-router, k0s also offers Calico as an alternative, built-in network provider. Calico is a layer 3 container networking solution that routes packets to pods. It supports, for example, pod-specific network policies that help to secure kubernetes clusters in demanding use cases. Calico uses the vxlan overlay network by default, and you can configure it to support ipip (IP-in-IP).

  • Does NOT support armv7
  • Uses bit more resources
  • Supports dual-stack (IPv4/IPv6) networking
  • Supports Windows nodes
"},{"location":"networking/#custom-cni-configuration","title":"Custom CNI configuration","text":"

You can opt-out of having k0s manage the network setup and choose instead to use any network plugin that adheres to the CNI specification. To do so, configure custom as the network provider in the k0s configuration file (k0s.yaml). You can do this, for example, by pushing network provider manifests into /var/lib/k0s/manifests, from where k0s controllers will collect them for deployment into the cluster (for more information, refer to Manifest Deployer.

"},{"location":"networking/#controller-worker-communication","title":"Controller-Worker communication","text":"

One goal of k0s is to allow for the deployment of an isolated control plane, which may prevent the establishment of an IP route between controller nodes and the pod network. Thus, to enable this communication path (which is mandated by conformance tests), k0s deploys Konnectivity service to proxy traffic from the API server (control plane) into the worker nodes. This ensures that we can always fulfill all the Kubernetes API functionalities, but still operate the control plane in total isolation from the workers.

Note: To allow Konnectivity agents running on the worker nodes to establish the connection, configure your firewalls for outbound access, port 8132. Moreover, configure your firewalls for outbound access, port 6443, in order to access Kube-API from the worker nodes.

"},{"location":"networking/#required-ports-and-protocols","title":"Required ports and protocols","text":"Protocol Port Service Direction Notes TCP 2380 etcd peers controller <-> controller TCP 6443 kube-apiserver Worker, CLI => controller Authenticated Kube API using Kube TLS client certs, ServiceAccount tokens with RBAC TCP 179 kube-router worker <-> worker BGP routing sessions between peers UDP 4789 Calico worker <-> worker Calico VXLAN overlay TCP 10250 kubelet Master, Worker => Host * Authenticated kubelet API for the master node kube-apiserver (and heapster/metrics-server addons) using TLS client certs TCP 9443 k0s-api controller <-> controller k0s controller join API, TLS with token auth TCP 8132 konnectivity worker <-> controller Konnectivity is used as \"reverse\" tunnel between kube-apiserver and worker kubelets"},{"location":"networking/#iptables","title":"iptables","text":"

iptables can work in two distinct modes, legacy and nftables. k0s autodetects the mode and prefers nftables. To check which mode k0s is configured with check ls -lah /var/lib/k0s/bin/. The iptables link target reveals the mode which k0s selected. k0s has the same logic as other k8s components, but to ensure al component have picked up the same mode you can check via: kube-proxy: nsenter -t $(pidof kube-proxy) -m iptables -V kube-router: nsenter -t $(pidof kube-router) -m /sbin/iptables -V calico: nsenter -t $(pidof -s calico-node) -m iptables -V

There are known version incompatibility issues in iptables versions. k0s ships (in /var/lib/k0s/bin) a version of iptables that is tested to interoperate with all other Kubernetes components it ships with. However if you have other tooling (firewalls etc.) on your hosts that uses iptables and the host iptables version is different that k0s (and other k8s components) ships with it may cause networking issues. This is based on the fact that iptables being user-space tooling it does not provide any strong version compatibility guarantees.

"},{"location":"networking/#firewalld-k0s","title":"Firewalld & k0s","text":"

If you are using firewalld on your hosts you need to ensure it is configured to use the same FirewallBackend as k0s and other Kubernetes components use. Otherwise networking will be broken in various ways.

Here's an example configuration for a tested working networking setup:

[root@rhel-test ~]# firewall-cmd --list-all\npublic (active)\ntarget: default\n  icmp-block-inversion: no\n  interfaces: eth0\n  sources: 10.244.0.0/16 10.96.0.0/12\n  services: cockpit dhcpv6-client ssh\n  ports: 80/tcp 6443/tcp 8132/tcp 10250/tcp 179/tcp 179/udp\n  protocols: forward: no\n  masquerade: yes\n  forward-ports: source-ports: icmp-blocks: rich rules:\n
"},{"location":"nllb/","title":"Node-local load balancing","text":"

Note: This feature is experimental! Expect instabilities and/or breaking changes.

For clusters that don't have an externally managed load balancer for the k0s control plane, there is another option to get a highly available control plane, at least from within the cluster. K0s calls this \"node-local load balancing\". In contrast to an externally managed load balancer, node-local load balancing takes place exclusively on the worker nodes. It does not contribute to making the control plane highly available to the outside world (e.g. humans interacting with the cluster using management tools such as Lens or kubectl), but rather makes the cluster itself internally resilient to controller node outages.

"},{"location":"nllb/#technical-functionality","title":"Technical functionality","text":"

The k0s worker process manages a load balancer on each worker node's loopback interface and configures the relevant components to use that load balancer. This allows for requests from worker components to the control plane to be distributed among all currently available controller nodes, rather than being directed to the controller node that has been used to join a particular worker into the cluster. This improves the reliability and fault tolerance of the cluster in case a controller node becomes unhealthy.

Envoy is the only load balancer that is supported so far. Please note that Envoy is not available on ARMv7, so node-local load balancing is currently unavailable on that platform.

"},{"location":"nllb/#enabling-in-a-cluster","title":"Enabling in a cluster","text":"

In order to use node-local load balancing, the cluster needs to comply with the following:

  • The cluster doesn't use an externally managed load balancer, i.e. the cluster configuration doesn't specify a non-empty spec.api.externalAddress.
  • The cluster doesn't use tunneled networking mode, i.e. the cluster configuration doesn't specify spec.api.tunneledNetworkingMode as true.
  • K0s isn't running as a single node, i.e. it isn't started using the --single flag.
  • The cluster should have multiple controller nodes. Node-local load balancing also works with a single controller node, but is only useful in conjunction with a highly available control plane.

Add the following to the cluster configuration (k0s.yaml):

spec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n

Or alternatively, if using k0sctl, add the following to the k0sctl configuration (k0sctl.yaml):

spec:\nk0s:\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n

All newly added worker nodes will then use node-local load balancing. The k0s worker process on worker nodes that are already running must be restarted for the new configuration to take effect.

"},{"location":"nllb/#full-example-using-k0sctl","title":"Full example using k0sctl","text":"

The following example shows a full k0sctl configuration file featuring three controllers and two workers with node-local load balancing enabled:

apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: v1.27.5+k0s.0\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\nhosts:\n- role: controller\nssh:\naddress: 10.81.146.254\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.184\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.113\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.198\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.51\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n

Save the above configuration into a file called k0sctl.yaml and apply it in order to bootstrap the cluster:

$ k0sctl apply\n\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588    \u2588\u2588\u2588    \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\nk0sctl 0.15.0 Copyright 2022, k0sctl authors.\nBy continuing to use k0sctl you agree to these terms:\nhttps://k0sproject.io/licenses/eula\nlevel=info msg=\"==> Running phase: Connect to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: connected\"\nlevel=info msg=\"==> Running phase: Detect host operating systems\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"==> Running phase: Acquire exclusive host lock\"\nlevel=info msg=\"==> Running phase: Prepare hosts\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing packages (curl)\"\nlevel=info msg=\"==> Running phase: Gather host facts\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: using k0s-controller-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: using k0s-worker-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: using k0s-worker-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: using k0s-controller-2 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: using k0s-controller-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: discovered eth0 as private interface\"\nlevel=info msg=\"==> Running phase: Download k0s binaries to local host\"\nlevel=info msg=\"==> Running phase: Validate hosts\"\nlevel=info msg=\"==> Running phase: Gather k0s facts\"\nlevel=info msg=\"==> Running phase: Validate facts\"\nlevel=info msg=\"==> Running phase: Upload k0s binaries to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"==> Running phase: Configure k0s\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: configuration was changed\"\nlevel=info msg=\"==> Running phase: Initialize the k0s cluster\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install controllers\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install workers\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: waiting for node to become ready\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: waiting for node to become ready\"\nlevel=info msg=\"==> Running phase: Release exclusive host lock\"\nlevel=info msg=\"==> Running phase: Disconnect from hosts\"\nlevel=info msg=\"==> Finished in 3m30s\"\nlevel=info msg=\"k0s cluster version v1.27.5+k0s.0 is now installed\"\nlevel=info msg=\"Tip: To access the cluster you can now fetch the admin kubeconfig using:\"\nlevel=info msg=\"     k0sctl kubeconfig\"\n

The cluster with the two nodes should be available by now. Setup the kubeconfig file in order to interact with it:

k0sctl kubeconfig > k0s-kubeconfig\nexport KUBECONFIG=$(pwd)/k0s-kubeconfig\n

The three controllers are available and provide API Server endpoints:

$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME                        HOLDER                                                             AGE\nk0s-ctrl-k0s-controller-0   9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1   2m37s\nk0s-ctrl-k0s-controller-1   fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97   2m18s\nk0s-ctrl-k0s-controller-2   5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   2m9s\nk0s-endpoint-reconciler     9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1   2m37s\n\n$ kubectl -n default get endpoints\nNAME         ENDPOINTS                                                  AGE\nkubernetes   10.81.146.113:6443,10.81.146.184:6443,10.81.146.254:6443   2m49s\n

The first controller is the current k0s leader. The two worker nodes can be listed, too:

$ kubectl get nodes -owide\nNAME           STATUS   ROLES    AGE     VERSION       INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION   CONTAINER-RUNTIME\nk0s-worker-0   Ready    <none>   2m16s   v1.27.5+k0s   10.81.146.198   <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\nk0s-worker-1   Ready    <none>   2m15s   v1.27.5+k0s   10.81.146.51    <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\n

There is one node-local load balancer pod running for each worker node:

$ kubectl -n kube-system get pod -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME                READY   STATUS    RESTARTS   AGE   IP              NODE           NOMINATED NODE   READINESS GATES\nnllb-k0s-worker-0   1/1     Running   0          81s   10.81.146.198   k0s-worker-0   <none>           <none>\nnllb-k0s-worker-1   1/1     Running   0          85s   10.81.146.51    k0s-worker-1   <none>           <none>\n

The cluster is using node-local load balancing and is able to tolerate the outage of one controller node. Shutdown the first controller to simulate a failure condition:

$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.254 'echo \"Powering off $(hostname) ...\" && sudo poweroff'\nPowering off k0s-controller-0 ...\n

Node-local load balancing provides high availability from within the cluster, not from the outside. The generated kubeconfig file lists the first controller's IP as the Kubernetes API server address by default. As this controller is gone by now, a subsequent call to kubectl will fail:

$ kubectl get nodes\nUnable to connect to the server: dial tcp 10.81.146.254:6443: connect: no route to host\n

Changing the server address in k0s-kubeconfig from the first controller to another one makes the cluster accessible again. Pick one of the other controller IP addresses and put that into the kubeconfig file. The addresses are listed both in k0sctl.yaml as well as in the output of kubectl -n default get endpoints above.

$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.184 hostname\nk0s-controller-1\n\n$ sed -i s#https://10\\\\.81\\\\.146\\\\.254:6443#https://10.81.146.184:6443#g k0s-kubeconfig\n\n$ kubectl get nodes -owide\nNAME           STATUS   ROLES    AGE     VERSION       INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION   CONTAINER-RUNTIME\nk0s-worker-0   Ready    <none>   3m35s   v1.27.5+k0s   10.81.146.198   <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\nk0s-worker-1   Ready    <none>   3m34s   v1.27.5+k0s   10.81.146.51    <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\n\n$ kubectl -n kube-system get pods -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME                READY   STATUS    RESTARTS   AGE     IP              NODE           NOMINATED NODE   READINESS GATES\nnllb-k0s-worker-0   1/1     Running   0          2m31s   10.81.146.198   k0s-worker-0   <none>           <none>\nnllb-k0s-worker-1   1/1     Running   0          2m35s   10.81.146.51    k0s-worker-1   <none>           <none>\n

The first controller is no longer active. Its IP address is not listed in the default/kubernetes Endpoints resource and its k0s controller lease is orphaned:

$ kubectl -n default get endpoints\nNAME         ENDPOINTS                               AGE\nkubernetes   10.81.146.113:6443,10.81.146.184:6443   3m56s\n\n$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME                        HOLDER                                                             AGE\nk0s-ctrl-k0s-controller-0                                                                      4m47s\nk0s-ctrl-k0s-controller-1   fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97   4m28s\nk0s-ctrl-k0s-controller-2   5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   4m19s\nk0s-endpoint-reconciler     5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   4m47s\n

Despite that controller being unavailable, the cluster remains operational. The third controller has become the new k0s leader. Workloads will run just fine:

$ kubectl -n default run nginx --image=nginx\npod/nginx created\n\n$ kubectl -n default get pods -owide\nNAME    READY   STATUS    RESTARTS   AGE   IP           NODE           NOMINATED NODE   READINESS GATES\nnginx   1/1     Running   0          16s   10.244.0.5   k0s-worker-1   <none>           <none>\n\n$ kubectl -n default logs nginx\n/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration\n/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh\n10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf\n10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh\n/docker-entrypoint.sh: Configuration complete; ready for start up\n[notice] 1#1: using the \"epoll\" event method\n[notice] 1#1: nginx/1.23.3\n[notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)\n[notice] 1#1: OS: Linux 5.15.83-0-virt\n[notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576\n[notice] 1#1: start worker processes\n[notice] 1#1: start worker process 28\n
"},{"location":"podsecurity/","title":"Pod Security Standards","text":"

Since Pod Security Policies have been removed in Kubernetes v1.25, Kubernetes offers Pod Security Standards \u2013 a new way to enhance cluster security.

To enable PSS in k0s you need to create an admission controller config file:

```yaml\napiVersion: apiserver.config.k8s.io/v1\nkind: AdmissionConfiguration\nplugins:\n- name: PodSecurity\n  configuration:\n    apiVersion: pod-security.admission.config.k8s.io/v1beta1\n    kind: PodSecurityConfiguration\n    # Defaults applied when a mode label is not set.\n    defaults:\n      enforce: \"privileged\"\n      enforce-version: \"latest\"\n    exemptions:\n      # Don't forget to exempt namespaces or users that are responsible for deploying\n      # cluster components, because they need to run privileged containers\n      usernames: [\"admin\"]\n      namespaces: [\"kube-system\"]\n```\n

Add these extra arguments to the k0s configuration:

```yaml\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\n  api:\n    extraArgs:\n      admission-control-config-file: /path/to/admission/control/config.yaml\n```\n
"},{"location":"raspberry-pi4/","title":"Create a Raspberry Pi 4 cluster","text":""},{"location":"raspberry-pi4/#prerequisites","title":"Prerequisites","text":"

This guide assumes that you use a Raspberry Pi 4 Model B computer and a sufficiently large SD card of at least 32 GB. We will be using Ubuntu Linux for this guide, although k0s should run quite fine on other 64-bit Linux distributions for the Raspberry Pi as well. Please file a Bug if you encounter any obstacles.

"},{"location":"raspberry-pi4/#set-up-the-system","title":"Set up the system","text":""},{"location":"raspberry-pi4/#prepare-sd-card-and-boot-up-the-raspberry-pi","title":"Prepare SD card and boot up the Raspberry Pi","text":"

Install Ubuntu Server 22.04.1 LTS 64-bit for Raspberry Pi. Ubuntu provides a step by step guide for the installation process. They use Raspberry Pi Imager, a specialized imaging utility that you can use to write the Ubuntu image, amongst others, to your SD cards. Follow that guide to get a working installation. (You can skip part 5 of the guide, since we won't need a Desktop Environment to run k0s.)

Alternatively, you can also opt to download the Ubuntu server image for Raspberry Pi manually and write it to an SD card using a tool like dd:

wget https://cdimage.ubuntu.com/releases/22.04.1/release/ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\nunxz ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\ndd if=ubuntu-22.04.1-preinstalled-server-arm64+raspi.img of=/dev/mmcblk0 bs=4M status=progress\n

Note: The manual process is more prone to accidental data loss than the guided one via Raspberry Pi Imager. Be sure to choose the correct device names. The previous content of the SD card will be wiped. Moreover, the partition written to the SD card needs to be resized to make the full capacity of the card available to Ubuntu. This can be achieved, for example, in this way:

growpart /dev/mmcblk0 2\nresize2fs /dev/mmcblk0p2\n

Ubuntu uses cloud-init to allow for automated customizations of the system configuration. The cloud-init configuration files are located on the boot partition of the SD card. You can mount that partition and modify those, e.g. to provision network configuration, users, authorized SSH keys, additional packages and also an automatic installation of k0s.

After you have prepared the SD card, plug it into the Raspberry Pi and boot it up. Once cloud-init finished bootstrapping the system, the default login credentials are set to user ubuntu with password ubuntu (which you will be prompted to change on first login).

"},{"location":"raspberry-pi4/#review-network-configurations","title":"Review network configurations","text":"

Note: For network configuration purposes, this documentation assumes that all of your computers are connected on the same subnet.

Review k0s's required ports and protocols to ensure that your network and firewall configurations allow necessary traffic for the cluster.

Review the Ubuntu Server Networking Configuration documentation to ensure that all systems have a static IP address on the network, or that the network is providing a static DHCP lease for the nodes. If the network should be managed via cloud-init, please refer to their documentation.

"},{"location":"raspberry-pi4/#optional-provision-ssh-keys","title":"(Optional) Provision SSH keys","text":"

Ubuntu Server deploys and enables OpenSSH via cloud-init by default. Confirm, though, that for whichever user you will deploy the cluster with on the build system, their SSH Key is copied to each node's root user. Before you start, the configuration should be such that the current user can run:

ssh root@${HOST}\n

Where ${HOST} is any node and the login can succeed with no further prompts.

"},{"location":"raspberry-pi4/#optional-create-a-swap-file","title":"(Optional) Create a swap file","text":"

While having a swap file is technically optional, it can help to ease memory pressure when running memory intensive workloads or on Raspberry Pis with less than 8 GB of RAM.

  1. To create a swap file:

    fallocate -l 2G /swapfile && \\\nchmod 0600 /swapfile && \\\nmkswap /swapfile && \\\nswapon -a\n
  2. Ensure that the usage of swap is not too aggressive by setting the sudo sysctl vm.swappiness=10 (the default is generally higher) and configuring it to be persistent in /etc/sysctl.d/*.

  3. Ensure that your swap is mounted after reboots by confirming that the following line exists in your /etc/fstab configuration:

    /swapfile         none           swap sw       0 0\n
"},{"location":"raspberry-pi4/#download-k0s","title":"Download k0s","text":"

Download a k0s release. For example:

wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-arm64 # replace version number!\nsudo install /tmp/k0s /usr/local/bin/k0s\n

\u2015 or \u2015

Use the k0s download script (as one command) to download the latest stable k0s and make it executable in /usr/bin/k0s.

curl -sSLf https://get.k0s.sh | sudo sh\n

At this point you can run k0s:

ubuntu@ubuntu:~$ k0s version\nv1.27.5+k0s.0\n

To check if k0s's system requirements and external runtime dependencies are fulfilled by your current setup, you can invoke k0s sysinfo:

ubuntu@ubuntu:~$ k0s sysinfo\nMachine ID: \"d84cde1f38844d1425dc04c454c5aa95e41fb11115bbb141c016f4cd3dea4f51\" (from machine) (pass)\nTotal memory: 3.7 GiB (pass)\nDisk space available for /var/lib/k0s: 24.3 GiB (pass)\nOperating system: Linux (pass)\n  Linux kernel release: 5.15.0-1013-raspi (pass)\n  Max. file descriptors per process: current: 1024 / max: 1048576 (warning: < 65536)\n  Executable in path: modprobe: /usr/sbin/modprobe (pass)\n  /proc file system: mounted (0x9fa0) (pass)\n  Control Groups: version 2 (pass)\n    cgroup controller \"cpu\": available (pass)\n    cgroup controller \"cpuacct\": available (via cpu in version 2) (pass)\n    cgroup controller \"cpuset\": available (pass)\n    cgroup controller \"memory\": available (pass)\n    cgroup controller \"devices\": available (assumed) (pass)\n    cgroup controller \"freezer\": available (assumed) (pass)\n    cgroup controller \"pids\": available (pass)\n    cgroup controller \"hugetlb\": available (pass)\n    cgroup controller \"blkio\": available (via io in version 2) (pass)\n  CONFIG_CGROUPS: Control Group support: built-in (pass)\n    CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem: built-in (pass)\n    CONFIG_CGROUP_PIDS: PIDs cgroup subsystem: built-in (pass)\n    CONFIG_CGROUP_DEVICE: Device controller for cgroups: built-in (pass)\n    CONFIG_CPUSETS: Cpuset support: built-in (pass)\n    CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem: built-in (pass)\n    CONFIG_MEMCG: Memory Resource Controller for Control Groups: built-in (pass)\n    CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups: built-in (pass)\n    CONFIG_CGROUP_SCHED: Group CPU scheduler: built-in (pass)\n      CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER: built-in (pass)\n        CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED: built-in (pass)\n    CONFIG_BLK_CGROUP: Block IO controller: built-in (pass)\n  CONFIG_NAMESPACES: Namespaces support: built-in (pass)\n    CONFIG_UTS_NS: UTS namespace: built-in (pass)\n    CONFIG_IPC_NS: IPC namespace: built-in (pass)\n    CONFIG_PID_NS: PID namespace: built-in (pass)\n    CONFIG_NET_NS: Network namespace: built-in (pass)\n  CONFIG_NET: Networking support: built-in (pass)\n    CONFIG_INET: TCP/IP networking: built-in (pass)\n      CONFIG_IPV6: The IPv6 protocol: built-in (pass)\n    CONFIG_NETFILTER: Network packet filtering framework (Netfilter): built-in (pass)\n      CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration: built-in (pass)\n      CONFIG_NETFILTER_XTABLES: Netfilter Xtables support: module (pass)\n        CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support: module (pass)\n        CONFIG_NETFILTER_XT_MARK: nfmark target and match support: module (pass)\n        CONFIG_NETFILTER_XT_SET: set target and match support: module (pass)\n        CONFIG_NETFILTER_XT_TARGET_MASQUERADE: MASQUERADE target support: module (pass)\n        CONFIG_NETFILTER_XT_NAT: \"SNAT and DNAT\" targets support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: \"addrtype\" address type match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_CONNTRACK: \"conntrack\" connection tracking match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_MULTIPORT: \"multiport\" Multiple port match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_RECENT: \"recent\" match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_STATISTIC: \"statistic\" match support: module (pass)\n      CONFIG_NETFILTER_NETLINK: module (pass)\n      CONFIG_NF_CONNTRACK: Netfilter connection tracking support: module (pass)\n      CONFIG_NF_NAT: module (pass)\n      CONFIG_IP_SET: IP set support: module (pass)\n        CONFIG_IP_SET_HASH_IP: hash:ip set support: module (pass)\n        CONFIG_IP_SET_HASH_NET: hash:net set support: module (pass)\n      CONFIG_IP_VS: IP virtual server support: module (pass)\n        CONFIG_IP_VS_NFCT: Netfilter connection tracking: built-in (pass)\n      CONFIG_NF_CONNTRACK_IPV4: IPv4 connetion tracking support (required for NAT): unknown (warning)\n      CONFIG_NF_REJECT_IPV4: IPv4 packet rejection: module (pass)\n      CONFIG_NF_NAT_IPV4: IPv4 NAT: unknown (warning)\n      CONFIG_IP_NF_IPTABLES: IP tables support: module (pass)\n        CONFIG_IP_NF_FILTER: Packet filtering: module (pass)\n          CONFIG_IP_NF_TARGET_REJECT: REJECT target support: module (pass)\n        CONFIG_IP_NF_NAT: iptables NAT support: module (pass)\n        CONFIG_IP_NF_MANGLE: Packet mangling: module (pass)\n      CONFIG_NF_DEFRAG_IPV4: module (pass)\n      CONFIG_NF_CONNTRACK_IPV6: IPv6 connetion tracking support (required for NAT): unknown (warning)\n      CONFIG_NF_NAT_IPV6: IPv6 NAT: unknown (warning)\n      CONFIG_IP6_NF_IPTABLES: IP6 tables support: module (pass)\n        CONFIG_IP6_NF_FILTER: Packet filtering: module (pass)\n        CONFIG_IP6_NF_MANGLE: Packet mangling: module (pass)\n        CONFIG_IP6_NF_NAT: ip6tables NAT support: module (pass)\n      CONFIG_NF_DEFRAG_IPV6: module (pass)\n    CONFIG_BRIDGE: 802.1d Ethernet Bridging: module (pass)\n      CONFIG_LLC: module (pass)\n      CONFIG_STP: module (pass)\n  CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem: built-in (pass)\n  CONFIG_PROC_FS: /proc file system support: built-in (pass)\n
"},{"location":"raspberry-pi4/#deploy-a-node","title":"Deploy a node","text":"

Each node can now serve as a control plane node or worker node or both.

"},{"location":"raspberry-pi4/#as-single-node","title":"As single node","text":"

This is a self-contained single node setup which runs both control plane components and worker components. If you don't plan join any more nodes into the cluster, this is for you.

Install the k0scontroller service:

ubuntu@ubuntu:~$ sudo k0s install controller --single\nubuntu@ubuntu:~$ sudo systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start it:

ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 09:56:02 UTC; 2s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 2720 (k0s)\n      Tasks: 10\n     Memory: 24.7M\n        CPU: 4.654s\n     CGroup: /system.slice/k0scontroller.service\n             \u2514\u25002720 /usr/local/bin/k0s controller --single=true\n\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 6275509116227039894094374442676315636193163621\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 336800507542010809697469355930007636411790073226\n

When the cluster is up, try to have a look:

ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNAME     STATUS   ROLES           AGE     VERSION       INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nubuntu   Ready    control-plane   4m41s   v1.27.5+k0s   10.152.56.54   <none>        Ubuntu 22.04.1 LTS   5.15.0-1013-raspi   containerd://1.7.2\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE     IP             NODE     NOMINATED NODE   READINESS GATES\nkube-system   kube-proxy-kkv2l                  1/1     Running   0          4m44s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   kube-router-vf2pv                 1/1     Running   0          4m44s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   coredns-88b745646-wd4mp           1/1     Running   0          5m10s   10.244.0.2     ubuntu   <none>           <none>\nkube-system   metrics-server-7d7c4887f4-ssk49   1/1     Running   0          5m6s    10.244.0.3     ubuntu   <none>           <none>\n

Overall, the single k0s node uses less than 1 GiB of RAM:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       715Mi       1.3Gi       3.0Mi       1.7Gi       2.8Gi\nSwap:             0B          0B          0B\n
"},{"location":"raspberry-pi4/#as-a-controller-node","title":"As a controller node","text":"

This will install k0s as a single non-HA controller. It won't be able to run any workloads, so you need to connect more workers to it.

Install the k0scontroller service. Note that we're not specifying any flags:

ubuntu@ubuntu:~$ sudo k0s install controller\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start it:

ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 10:31:07 UTC; 3s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 1176 (k0s)\n      Tasks: 10\n     Memory: 30.2M\n        CPU: 8.936s\n     CGroup: /system.slice/k0scontroller.service\n             \u2514\u25001176 /usr/local/bin/k0s controller\n\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 723202396395786987172578079268287418983457689579\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 36297085497443583023060005045470362249819432477\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 728910847354665355109188021924183608444435075827\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generate received request\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] received CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generating key: rsa-2048\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 718948898553094584370065610752227487244528071083\n

As soon as the controller is up, we can try to inspect the API as we did for the single node:

ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNo resources found\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE   IP       NODE     NOMINATED NODE   READINESS GATES\nkube-system   coredns-88b745646-6tpwm           0/1     Pending   0          29s   <none>   <none>   <none>           <none>\nkube-system   metrics-server-7d7c4887f4-9k5k5   0/1     Pending   0          24s   <none>   <none>   <none>           <none>\n

As we see, there are no nodes and two pending pods. A control plane without workers. The memory consumption is below the single node controller, but not much:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       678Mi       2.3Gi       3.0Mi       758Mi       2.9Gi\nSwap:             0B          0B          0B\n

This controller runs a full-fledged control plane, backed by etcd, as opposed to the lightweight kine based one from the single node example. For the latter, k0s doesn't support joining new nodes.

More nodes can be added by creating join tokens. To add a worker node, create a token for it:

ubuntu@ubuntu:~$ sudo k0s token create --role worker\nH4sIAAAAAAAC/2yV0Y6jPBKF7/MUeYGZ30DonUTai5+Ak5DgbhuXHXwHmAnBhtAJHdKs9t1XnZmRdqW9K1cdfceyrDqzvD+L6no7X7rV/O7MSvtxG6rrbTX7Nv9dr2bz+Xx+q6736rqa18PQ31Z//eWg747vfvdfvvuL1cti4T1VZXUdzj/PZT5U3/KPob5cz8PnN50P+Wp+SNFwSJ01Ax3zcxAyEUMKKqYIA3vO0LA2TpwCC1hEQipFrxD2UogDhawQobWJY297jxHBCdbS70hIvWKTOMWGBcwhgUaMSegPhdPH+VY13GDGYNxTiwONdMSEJtTiLeVYMMALDn6dOKqXtt5r0WfQPpqK43cpWKBAecnWktxEiAvWVZEDghPCorhmXTlWp/7PTPz3jEPcVZF6p0KsFfIlNZiIiB11iFUhlJ+1jkxwn/EjU4kRnnI1zsEJkkiH4OHt2pI4a0gEINZUYEEhQinEkUb4qU0Rvn+9CQD5UKJ0dKfG1NVZ2dWCcfCkHFDKycjbYZuGIsk5DngY7Svcn3N5mdIGm1yylkU+Srcxyiy7l50ZRUTvGqtcNuK9QAvEjcihu4yJh/sipC5xy4nBssut9UrcB6nENz72JnfxKLBmxAseZftgyhHvfLIjaeK+PNYX2tmwkKQrGjPlSFAI2VRKmyZmidjnsGCefRfe6Vp4p6veBk0FCtaN/uBu7JAp9kS6nFKDCQvxVUXYsGPiFji+VU05UtFvdLt8oVK8JRE+5m6fZfbvBcGa8QhH0pzG6vxjLEOSEJvtZdRvhNSywNmCejEihiRMYp/IH34utZc6GpdwWwgbc9Hhh5Q+4ushLeXJEZ6t85YBCLxTTfwmGhyWW+HC2B+AE1DnYdK4l9pYJ/P0jhn1mrsq1MbHKYqcRO6cyuAQQG/kRlsq2aOK/HVp2FZKDVRqQg0OmNuz3MTB2jgBiXSQCGHYVmN6XnoAItDIrmnbBxDFHbdqB8ZZU5ktGMRAgQUApzuH3chQ9BCSRcrBR2riVCHxBt5ln3kYlXKxKKI6JEizV4wn3tWyMMk1N/iVtvpayvqaQ+nrKfj6gxMzOOCIBF/+cBQv4JG4AnATe0GZjUNy6gcWkkG5CJGpntKGTnzb472XfeqtekuQzqsWua+bpaw2j9d0ih02YZauh5y4/v7gqZzY2lYmVuWkahFqzF0cri1jbPu3n4d6nVp10G4fVw3OZbp8VabfaQfvtWN9zYNOdfVYmIWjz4PMzOOFmv5Nb3u39CgqXdUCth4xyxrwaQ8Oc3On9xIet3mHmewCj7kJgmP/pr3os5i0oLx+1+4yyj1mcwuTmDIko50DpndhWwNxHwcQQSuEGFljI0Z7lYJ1EhgnguJ3PukPYXr3VbJYOCdE5ECSFpBqgrDEpzFzRSfFxSUgIrJhUQZxW5jazxpCk445CfK3RMbHdcOGtL2N0O7uAuyCId8A0izZ4B2EseQb55EgwVX7+CyjmB9c1eSTVQXeLWiDj4CjUW7ZXXl9nR7pqDYKUXnZqyZ4r46x98bR/vduxtzQE0UiFZHdpEACEcFzLx/o5Z+z+bzL22o1N+g2Ky/dUD2GXznxq/6VE39C46n6anzcnqePorLV8K24XIbbcM37/6V9XK9VN3z7Q3o2zbnTq/n60v08n2b9tfpZXauurG6r+b/+PfuiPs1/Q/4P/mn8vMJwMVW3mrvL84/lj+8N8ia/uZ/Lf2izWFb57D8BAAD//zANvmsEBwAA\n

Save the join token for subsequent steps.

"},{"location":"raspberry-pi4/#as-a-worker-node","title":"As a worker node","text":"

To join an existing k0s cluster, create the join token file for the worker (where $TOKEN_CONTENT is one of the join tokens created in the control plane setup):

sudo sh -c 'mkdir -p /var/lib/k0s/ && umask 077 && echo \"$TOKEN_CONTENT\" > /var/lib/k0s/join-token'\n

After that, install the k0sworker service:

ubuntu@ubuntu:~$ sudo k0s install worker --token-file /var/lib/k0s/join-token\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cb k0sworker.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start the service:

ubuntu@ubuntu:~$ sudo systemctl start k0sworker.service\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cf k0sworker.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 13:48:58 UTC; 2s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 1631 (k0s)\n      Tasks: 22\n     Memory: 181.7M\n        CPU: 4.010s\n     CGroup: /system.slice/k0sworker.service\n             \u251c\u25001631 /usr/local/bin/k0s worker --token-file=/var/lib/k0s/join-token\n             \u2514\u25001643 /var/lib/k0s/bin/containerd --root=/var/lib/k0s/containerd --state=/run/k0s/containerd --address=/run/k0s/containerd.sock --log-level=info --config=/etc/k0s/containerd.toml\n\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1643\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting OCIBundleReconciler\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: /run/systemd/resolve/resolv.conf\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1648\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Status\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Autopilot\"\n

As this is a worker node, we cannot access the Kubernetes API via the builtin k0s kc subcommand, but we can check the k0s API instead:

ubuntu@ubuntu:~$ sudo k0s status\nVersion: v1.27.5+k0s.0\nProcess ID: 1631\nRole: worker\nWorkloads: true\nSingleNode: false\n

The memory requirements are also pretty low:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       336Mi       2.1Gi       3.0Mi       1.2Gi       3.2Gi\nSwap:             0B          0B          0B\n
"},{"location":"raspberry-pi4/#connect-to-the-cluster","title":"Connect to the cluster","text":"

On a controller node, generate a new raspi-cluster-master user with admin rights and get a kubeconfig for it:

ubuntu@ubuntu:~$ sudo k0s kc create clusterrolebinding raspi-cluster-master-admin --clusterrole=admin --user=raspi-cluster-master\nclusterrolebinding.rbac.authorization.k8s.io/raspi-cluster-master-admin created\nubuntu@ubuntu:~$ sudo k0s kubeconfig create --groups system:masters raspi-cluster-master\n\napiVersion: v1\nclusters:\n- cluster:\n    server: https://10.152.56.54:6443\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBRENDQWVpZ0F3SUJBZ0lVT2RSVzdWdm83UWR5dmdFZHRUK1V3WDN2YXdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5EQTFNREJhRncwegpNakE0TVRVeE5EQTFNREJhTUJneEZqQVVCZ05WQkFNVERXdDFZbVZ5Ym1WMFpYTXRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURsdy8wRFJtcG1xRjVnVElmN1o5bElRN0RFdUp6WDJLN1MKcWNvYk5oallFanBqbnBDaXFYOSt5T1R2cGgyUlRKN2tvaGkvUGxrYm5oM2pkeVQ3NWxSMGowSkV1elRMaUdJcApoR2pqc3htek5RRWVwb210R0JwZXNGeUE3NmxTNVp6WVJtT0lFQVgwb0liWjBZazhuU3pQaXBsWDMwcTFETEhGCkVIcSsyZG9vVXRIb09EaEdmWFRJTUJsclZCV3dCV3cxbmdnN0dKb01TN2tHblpYaUw2NFBiRDg5NmtjYXo0a28KTXhhZGc1ZmZQNStBV3JIVHhKV1d2YjNCMjEyOWx3R3FiOHhMTCt1cnVISHVjNEh4em9OVUt1WUlXc2lvQWp4YgphdDh6M1QwV2RnSit2VithWWlRNFlLeEVFdFB4cEMvUHk0czU0UHF4RzVZa0hiMDczMEUxQWdNQkFBR2pRakJBCk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTd2p4STIKRUxVNCtNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQ3k3dHFFMk5WT3E0Z0I1Ngp2clVZMFU0SWp1c0dUN0UzQ2xqSUtQODk2Mm9xdlpvU0NWb2U5YS9UQTR6ZXYrSXJwaTZ1QXFxc3RmT3JFcDJ4CmVwMWdYZHQrbG5nV0xlbXdWdEVOZ0xvSnBTM09Vc3N1ai9XcmJwSVU4M04xWVJTRzdzU21KdXhpa3pnVUhiUk8KZ01SLzIxSDFESzJFdmdQY2pHWXlGbUQzSXQzSjVNcnNiUHZTRG4rUzdWWWF0eWhIMUo4dmwxVDFpbzRWWjRTNgpJRFlaV05JOU10TUpqcGxXL01pRnlwTUhFU1E3UEhHeHpGVExoWFplS0pKSlRPYXFha1AxM3J1WFByVHVDQkl4CkFCSWQraU9qdGhSU3ZxbTFocGtHcmY4Rm9PdG1PYXZmazdDdnNJTWdUV2pqd2JJZWZIRU8zUmVBMzZWZWV3bXoKOFJHVUtBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n  name: k0s\ncontexts:\n- context:\n    cluster: k0s\n    user: raspi-cluster-master\n  name: k0s\ncurrent-context: k0s\nkind: Config\npreferences: {}\nusers:\n- name: raspi-cluster-master\n  user:\n    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYVENDQWtXZ0F3SUJBZ0lVV0ZZNkZ4cCtUYnhxQUxTVjM0REVMb0dEc3Q0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5ERTRNREJhRncweQpNekE0TVRneE5ERTRNREJhTURneEZ6QVZCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVIwd0d3WURWUVFECkV4UnlZWE53YVMxamJIVnpkR1Z5TFcxaGMzUmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTGJNalI5eHA1dDJzank1S0dEQnQ2dWl3QU4vaEhwZkFUNXJrZTFRblc2eFlZeDYzR2JBTXYrRQpjWmEyUEdPempQeVVTZThVdWp4ZnR0L1JWSTJRVkVIRGlJZ1ZDNk1tUUFmTm1WVlpKOHBFaTM2dGJZYUVxN3dxClhxYmJBQ0F0ZGtwNTJ0Y0RLVU9sRS9SV0tUSjN4bXUvRmh0OTIrRDdtM1RrZTE0TkJ5a1hvakk1a2xVWU9ySEMKVTN3V210eXlIUFpDMFBPdWpXSE5yeS9wOXFjZzRreWNDN0NzUVZqMWoxY2JwdXRpWllvRHNHV3piS0RTbExRZApyYnUwRnRVZVpUQzVPN2NuTk5tMU1EZldubXhlekw4L2N5dkJCYnRmMjhmcERFeEhMT2dTY2ZZUlZwUllPMzdvCk5yUjljMGNaZE9oZW5YVnlQcU1WVVlSNkQxMlRrY0VDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCUitqQTlGNm1jc25ob2NtMnd0dFNYY2tCaUpoakFmQmdOVkhTTUVHREFXZ0JTd2p4STJFTFU0CitNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBY2RRV3N4OUpHOUIxckxVc2Y1QzgKd1BzTkhkZURYeG1idm4zbXN3aFdVMEZHU1pjWjlkMTYzeXhEWnA4QlNzNWFjNnZqcU1lWlFyRThDUXdXYTlxVAowZVJXcTlFODYzcS9VcFVNN3lPM1BnMHd4RWtQSTVuSjRkM0o3MHA3Zk4zenpzMUJzU0h6Q2hzOWR4dE5XaVp5CnNINzdhbG9NanA0cXBEVWRyVWcyT0d4RWhRdzJIaXE3ZEprQm80a3hoWmhBc3lWTDdZRng0SDY3WkIzSjY4V3QKdTdiWnRmUVJZV3ZPUE9oS0pFdmlLVXptNDJBUlZXTDdhZHVESTBBNmpxbXhkTGNxKzlNWVlaNm1CT0NWakx1WgoybDlJSVI2NkdjOUdpdC9kSFdwbTVZbmozeW8xcUU0UVg4ZmVUQTczUlU5cmFIdkNpTGdVbFRaVUNGa3JNL0NtCndBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHN5TkgzR25tM2F5UExrb1lNRzNxNkxBQTMrRWVsOEJQbXVSN1ZDZGJyRmhqSHJjClpzQXkvNFJ4bHJZOFk3T00vSlJKN3hTNlBGKzIzOUZValpCVVFjT0lpQlVMb3laQUI4MlpWVmtueWtTTGZxMXQKaG9TcnZDcGVwdHNBSUMxMlNubmExd01wUTZVVDlGWXBNbmZHYTc4V0czM2I0UHViZE9SN1hnMEhLUmVpTWptUwpWUmc2c2NKVGZCYWEzTEljOWtMUTg2Nk5ZYzJ2TCtuMnB5RGlUSndMc0t4QldQV1BWeHVtNjJKbGlnT3daYk5zCm9OS1V0QjJ0dTdRVzFSNWxNTGs3dHljMDJiVXdOOWFlYkY3TXZ6OXpLOEVGdTEvYngra01URWNzNkJKeDloRlcKbEZnN2Z1ZzJ0SDF6UnhsMDZGNmRkWEkrb3hWUmhIb1BYWk9Sd1FJREFRQUJBb0lCQUFpYytzbFFnYVZCb29SWgo5UjBhQTUyQ3ZhbHNpTUY3V0lPb2JlZlF0SnBTb1ZZTk0vVmplUU94S2VrQURUaGxiVzg1VFlLR1o0QVF3bjBwClQrS2J1bHllNmYvL2ZkemlJSUk5bmN2M3QzaEFZcEpGZWJPczdLcWhGSFNvUFFsSEd4dkhRaGgvZmFKQ1ZQNWUKVVBLZjBpbWhoMWtrUlFnRTB2NWZCYkVZekEyVGl4bThJSGtQUkdmZWN4WmF1VHpBS2VLR0hjTFpDem8xRHhlSgp3bHpEUW9YWDdHQnY5MGxqR1pndENXcFEyRUxaZ1NwdW0rZ0crekg1WFNXZXgwMzJ4d0NhbkdDdGcyRmxHd2V2Ck9PaG8zSjNrRWVJR1MzSzFJY24rcU9hMjRGZmgvcmRsWXFSdStWeEZ4ZkZqWGxaUjdjZkF4Mnc1Z3NmWm9CRXIKUE1oMTdVRUNnWUVBejZiTDc4RWsvZU1jczF6aWdaVVpZcE5qa2FuWHlsS3NUUWM1dU1pRmNORFdObFkxdlQzVQprOHE5cHVLbnBZRVlTTGVVTS9tSWk5TVp6bmZjSmJSL0hJSG9YVjFMQVJ2blQ0djN3T0JsaDc5ajdKUjBpOW1OClYrR0Q1SlNPUmZCVmYxVlJHRXN6d3ZhOVJsS2lMZ0JVM2tKeWN2Q09jYm5aeFltSXRrbDhDbXNDZ1lFQTRWeG4KZTY2QURIYmR3T0plbEFSKytkVHh5eVYyRjY1SEZDNldPQVh2RVRucGRudnRRUUprWWhNYzM1Y2gvMldmZDBWYQpZb3lGZE9kRThKZSsvcWxuS1pBc3BHRC9yZHp2VmFteHQ4WXdrQXU5Q1diZWw2VENPYkZOQ2hjK1NUbmRqN0duCmlSUHprM1JYMnBEVi9OaW5FVFA0TEJnTHJQYkxlSVAwSzZ4bjk0TUNnWUVBeXZGMmNVendUVjRRNTgrSTVDS0gKVzhzMnpkOFRzbjVZUFRRcG1zb0hlTG55RWNyeDNKRTRXSFVXSTZ0ek01TFczQUxuU21DL3JnQlVRWER0Yk1CYQpWczh6L1VPM2tVN25JOXhrK0ZHWGlUTnBnb2VZM0RGMExZYVBNL0JvbUR3S0kxZUwyVlZ1TWthWnQ4ZjlEejV0CnM0ZDNlWlJYY3hpem1KY1JVUzdDbHg4Q2dZQk45Vmc2K2RlRCtFNm4zZWNYenlKWnJHZGtmZllISlJ1amlLWWcKaFRUNVFZNVlsWEF5Yi9CbjJQTEJDaGdSc0lia2pKSkN5eGVUcERrOS9WQnQ2ZzRzMjVvRjF5UTdjZFU5VGZHVApnRFRtYjVrYU9vSy85SmZYdTFUS0s5WTVJSkpibGZvOXVqQWxqemFnL2o5NE16NC8vamxZajR6aWJaRmZoRTRnCkdZanhud0tCZ0U1cFIwMlVCa1hYL3IvdjRqck52enNDSjR5V3U2aWtpem00UmJKUXJVdEVNd1Y3a2JjNEs0VFIKM2s1blo1M1J4OGhjYTlMbXREcDJIRWo2MlBpL2pMR0JTN0NhOCtQcStxNjZwWWFZTDAwWnc4UGI3OVMrUmpzQQpONkNuQWg1dDFYeDhVMTIvWm9JcjBpOWZDaERuNlBqVEM0MVh5M1EwWWd6TW5jYXMyNVBiCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n

Using the above kubeconfig, you can now access and use the cluster:

ubuntu@ubuntu:~$ KUBECONFIG=/path/to/kubeconfig kubectl get nodes,deployments,pods -owide -A\nNAME          STATUS   ROLES    AGE    VERSION       INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nnode/ubuntu   Ready    <none>   5m1s   v1.27.5+k0s   10.152.56.54   <none>        Ubuntu 22.04.1 LTS   5.15.0-1013-raspi   containerd://1.7.2\n\nNAMESPACE     NAME                             READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS       IMAGES                                                 SELECTOR\nkube-system   deployment.apps/coredns          1/1     1            1           33m   coredns          registry.k8s.io/coredns/coredns:v1.7.0                 k8s-app=kube-dns\nkube-system   deployment.apps/metrics-server   1/1     1            1           33m   metrics-server   registry.k8s.io/metrics-server/metrics-server:v0.6.4   k8s-app=metrics-server\n\nNAMESPACE     NAME                                  READY   STATUS    RESTARTS   AGE    IP             NODE     NOMINATED NODE   READINESS GATES\nkube-system   pod/coredns-88b745646-pkk5w           1/1     Running   0          33m    10.244.0.5     ubuntu   <none>           <none>\nkube-system   pod/konnectivity-agent-h4nfj          1/1     Running   0          5m1s   10.244.0.6     ubuntu   <none>           <none>\nkube-system   pod/kube-proxy-qcgzs                  1/1     Running   0          5m1s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   pod/kube-router-6lrht                 1/1     Running   0          5m1s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   pod/metrics-server-7d7c4887f4-wwbkk   1/1     Running   0          33m    10.244.0.4     ubuntu   <none>           <none>\n
"},{"location":"reinstall-k0sctl/","title":"Reinstall a node","text":"

k0sctl currently does not support changing all the configuration of containerd (state, root) on the fly.

For example, in order to move containerd's root directory to a new partition/drive, you have to provide --data-dir /new/drive in your k0sctl installFlags for each (worker) node. --data-dir is an option of k0s and then added to the service unit.

The following is an example of that:

# spec.hosts[*].installFlags\n- role: worker\ninstallFlags:\n- --profile flatcar\n- --enable-cloud-provider\n- --data-dir /new/drive\n- --kubelet-extra-args=\"--cloud-provider=external\"\n

However, the installFlags are only used when the node is installed.

"},{"location":"reinstall-k0sctl/#steps","title":"Steps","text":"

Drain the node:

kubectl drain node.hostname\n

Access your node (e.g. via ssh) to stop and reset k0s:

sudo k0s stop\nsudo k0s reset\n

Reboot the node (for good measure):

sudo systemctl reboot\n

Once the node is available again, run k0sctl apply to integrate it into your cluster and uncordon the node to allow pods to be scheduled:

k0sctl apply -c config.yaml\nkubectl uncordon node.hostname\n
"},{"location":"releases/","title":"Releases","text":"

This page describes how we release and support the k0s project. Mirantis Inc. can also provide commercial support for k0s.

"},{"location":"releases/#upstream-kubernetes-release-support-cycle","title":"Upstream Kubernetes release & support cycle","text":"

This release and support cycle is followed for ALL new minor releases. A minor release can be e.g. 1.25, 1.26 and so on. What this means in practice is that every 4 months there is a new minor release published.

After a minor release is published, the upstream community is maintaining it for 14 months. Maintenance in this case means that upstream Kubernetes provides bug fixes, CVE mitigations and such for 14 months per minor release.

"},{"location":"releases/#k0s-release-and-support-model","title":"k0s release and support model","text":"

Starting from the k0s 1.21, k0s started following the Kubernetes project's release and support model.

k0s project follows closely the upstream Kubernetes release cycle. The only difference to upstream Kubernetes release / maintenance schedule is that our initial release date is always a few weeks behind the upstream Kubernetes version release date as we are building our version of k0s from the officially released version of Kubernetes and need time for testing the final version before shipping.

Given the fact that upstream Kubernetes provides support and patch releases for a minor version for roughly 14 months, it means that k0s will follow this same model. Each minor release is maintained for roughly 14 months since its initial release.

k0s project will typically include patches and fixes included in a Kubernetes upstream patch release for the fixes needed in k0s own codebase. For example, if a bug is identified in 1.26 series k0s project will create and ship a fix for it with the next upstream Kubernetes 1.26.x release. In rare cases where a critical bug is identified we may also ship \u201cout of band\u201d patches. Such out-of-band release would be identified in the version string suffix. For example a normal release following Kubernetes upstream would be 1.26.3+k0s.0 whereas a critical out-of-band patch would be identified as 1.26.3+k0s.1.

"},{"location":"releases/#new-features-and-enhancements","title":"New features and enhancements","text":"

The biggest new k0s features will typically only be delivered on top of the latest Kubernetes version, but smaller enhancements can be included in older release tracks as well.

"},{"location":"releases/#version-string","title":"Version string","text":"

The k0s version string consists of the Kubernetes version and the k0s version. For example:

  • v1.27.5+k0s.0

The Kubernetes version (1.27.5) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.

"},{"location":"remove_controller/","title":"Remove or replace a controller","text":"

You can manually remove or replace a controller from a multi-node k0s cluster (>=3 controllers) without downtime. However, you have to maintain quorum on Etcd while doing so.

"},{"location":"remove_controller/#remove-a-controller","title":"Remove a controller","text":"

If your controller is also a worker (k0s controller --enable-worker), you first have to delete the controller from Kubernetes itself. To do so, run the following commands from the controller:

# Remove the containers from the node and cordon it\nk0s kubectl drain --ignore-daemonsets --delete-emptydir-data <controller>\n# Delete the node from the cluster\nk0s kubectl delete node <controller>\n

Then you need to remove it from the Etcd cluster. For example, if you want to remove controller01 from a cluster with 3 controllers:

# First, list the Etcd members\nk0s etcd member-list\n{\"members\":{\"controller01\":\"<PEER_ADDRESS1>\", \"controller02\": \"<PEER_ADDRESS2>\", \"controller03\": \"<PEER_ADDRESS3>\"}}\n# Then, remove the controller01 using its peer address\nk0s etcd leave --peer-address \"<PEER_ADDRESS1>\"\n

The controller is now removed from the cluster. To reset k0s on the machine, run the following commands:

k0s stop\nk0s reset\nreboot\n
"},{"location":"remove_controller/#replace-a-controller","title":"Replace a controller","text":"

To replace a controller, you first remove the old controller (like described above) then follow the manual installation procedure to add the new one.

"},{"location":"reset/","title":"Uninstall/Reset","text":"

k0s can be uninstalled locally with k0s reset command and remotely with k0sctl reset command. They remove all k0s-related files from the host.

reset operates under the assumption that k0s is installed as a service on the host.

"},{"location":"reset/#uninstall-a-k0s-node-locally","title":"Uninstall a k0s node locally","text":"

To prevent accidental triggering, k0s reset will not run if the k0s service is running, so you must first stop the service:

  1. Stop the service:

    sudo k0s stop\n
  2. Invoke the reset command:

    $ sudo k0s reset\nINFO[2021-06-29 13:08:39] * containers steps\nINFO[2021-06-29 13:08:44] successfully removed k0s containers!\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * remove k0s users step:\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * uninstall service step\nINFO[2021-06-29 13:08:44] Uninstalling the k0s service\nINFO[2021-06-29 13:08:45] * remove directories step\nINFO[2021-06-29 13:08:45] * CNI leftovers cleanup step\nINFO k0s cleanup operations done. To ensure a full reset, a node reboot is recommended.\n
"},{"location":"reset/#uninstall-a-k0s-cluster-using-k0sctl","title":"Uninstall a k0s cluster using k0sctl","text":"

k0sctl can be used to connect each node and remove all k0s-related files and processes from the hosts.

  1. Invoke k0sctl reset command:
    $ k0sctl reset --config k0sctl.yaml\nk0sctl v0.9.0 Copyright 2021, k0sctl authors.\n\n? Going to reset all of the hosts, which will destroy all configuration and data, Are you sure? Yes\nINFO ==> Running phase: Connect to hosts \nINFO [ssh] 13.53.43.63:22: connected              \nINFO [ssh] 13.53.218.149:22: connected            INFO ==> Running phase: Detect host operating systems \nINFO [ssh] 13.53.43.63:22: is running Ubuntu 20.04.2 LTS \nINFO [ssh] 13.53.218.149:22: is running Ubuntu 20.04.2 LTS INFO ==> Running phase: Prepare hosts    INFO ==> Running phase: Gather k0s facts \nINFO [ssh] 13.53.43.63:22: found existing configuration \nINFO [ssh] 13.53.43.63:22: is running k0s controller version 1.27.5+k0s.0\nINFO [ssh] 13.53.218.149:22: is running k0s worker version 1.27.5+k0s.0\nINFO [ssh] 13.53.43.63:22: checking if worker  has joined INFO ==> Running phase: Reset hosts      \nINFO [ssh] 13.53.43.63:22: stopping k0s           \nINFO [ssh] 13.53.218.149:22: stopping k0s         \nINFO [ssh] 13.53.218.149:22: running k0s reset    \nINFO [ssh] 13.53.43.63:22: running k0s reset      INFO ==> Running phase: Disconnect from hosts INFO ==> Finished in 8s                  
"},{"location":"runtime/","title":"Runtime","text":"

k0s uses containerd as the default Container Runtime Interface (CRI) and runc as the default low-level runtime. In most cases they don't require any configuration changes. However, if custom configuration is needed, this page provides some examples.

"},{"location":"runtime/#containerd-configuration","title":"containerd configuration","text":"

By default k0s manages the full containerd configuration. User has the option of fully overriding, and thus also managing, the configuration themselves.

"},{"location":"runtime/#user-managed-containerd-configuration","title":"User managed containerd configuration","text":"

In the default k0s generated configuration there's a \"magic\" comment telling k0s it is k0s managed:

# k0s_managed=true\n

If you wish to take over the configuration management remove this line.

To make changes to containerd configuration you must first generate a default containerd configuration, with the default values set to /etc/k0s/containerd.toml:

containerd config default > /etc/k0s/containerd.toml\n

k0s runs containerd with the following default values:

/var/lib/k0s/bin/containerd \\\n--root=/var/lib/k0s/containerd \\\n--state=/run/k0s/containerd \\\n--address=/run/k0s/containerd.sock \\\n--config=/etc/k0s/containerd.toml\n

Next, add the following default values to the configuration file:

version = 2\nroot = \"/var/lib/k0s/containerd\"\nstate = \"/run/k0s/containerd\"\n...\n\n[grpc]\naddress = \"/run/k0s/containerd.sock\"\n
"},{"location":"runtime/#k0s-managed-dynamic-runtime-configuration","title":"k0s managed dynamic runtime configuration","text":"

From 1.27.1 onwards k0s enables dynamic configuration on containerd CRI runtimes. This works by k0s creating a special directory in /etc/k0s/containerd.d/ where user can drop-in partial containerd configuration snippets.

k0s will automatically pick up these files and adds these in containerd configuration imports list. If k0s sees the configuration drop-ins are CRI related configurations k0s will automatically collect all these into a single file and adds that as a single import file. This is to overcome some hard limitation on containerd 1.X versions. Read more at containerd#8056

"},{"location":"runtime/#examples","title":"Examples","text":"

Following chapters provide some examples how to configure different runtimes for containerd using k0s managed drop-in configurations.

"},{"location":"runtime/#using-gvisor","title":"Using gVisor","text":"

gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system.

  1. Install the needed gVisor binaries into the host.

    (\nset -e\n  ARCH=$(uname -m)\nURL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}\nwget ${URL}/runsc ${URL}/runsc.sha512 \\\n${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512\n  sha512sum -c runsc.sha512 \\\n-c containerd-shim-runsc-v1.sha512\n  rm -f *.sha512\n  chmod a+rx runsc containerd-shim-runsc-v1\n  sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin\n)\n

    Refer to the gVisor install docs for more information.

  2. Prepare the config for k0s managed containerD, to utilize gVisor as additional runtime:

    cat <<EOF | sudo tee /etc/k0s/containerd.d/gvisor.toml\nversion = 2\n\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc]\n  runtime_type = \"io.containerd.runsc.v1\"\nEOF\n
  3. Start and join the worker into the cluster, as normal:

    k0s worker $token\n
  4. Register containerd to the Kubernetes side to make gVisor runtime usable for workloads (by default, containerd uses normal runc as the runtime):

    cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n  name: gvisor\nhandler: runsc\nEOF\n

    At this point, you can use gVisor runtime for your workloads:

    apiVersion: v1\nkind: Pod\nmetadata:\nname: nginx-gvisor\nspec:\nruntimeClassName: gvisor\ncontainers:\n- name: nginx\nimage: nginx\n
  5. (Optional) Verify that the created nginx pod is running under gVisor runtime:

    # kubectl exec nginx-gvisor -- dmesg | grep -i gvisor\n[    0.000000] Starting gVisor...\n
"},{"location":"runtime/#using-nvidia-container-runtime","title":"Using nvidia-container-runtime","text":"

First, install the NVIDIA runtime components:

distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list\nsudo apt-get update && sudo apt-get install -y nvidia-container-runtime\n

Next, drop in the containerd runtime configuration snippet into /etc/k0s/containerd.d/nvidia.toml

[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia]\nprivileged_without_host_devices = false\nruntime_engine = \"\"\nruntime_root = \"\"\nruntime_type = \"io.containerd.runc.v1\"\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia.options]\nBinaryName = \"/usr/bin/nvidia-container-runtime\"\n

Create the needed RuntimeClass:

cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n  name: nvidia\nhandler: nvidia\nEOF\n

Note Detailed instruction on how to run nvidia-container-runtime on your node is available here.

"},{"location":"runtime/#using-custom-cri-runtime","title":"Using custom CRI runtime","text":"

Warning: You can use your own CRI runtime with k0s (for example, docker). However, k0s will not start or manage the runtime, and configuration is solely your responsibility.

Use the option --cri-socket to run a k0s worker with a custom CRI runtime. the option takes input in the form of <type>:<socket_path> (for type, use docker for a pure Docker setup and remote for anything else).

"},{"location":"runtime/#using-dockershim","title":"Using dockershim","text":"

To run k0s with a pre-existing Dockershim setup, run the worker with k0s worker --cri-socket docker:unix:///var/run/cri-dockerd.sock <token>. A detailed explanation on dockershim and a guide for installing cri-dockerd can be found in our k0s dockershim guide.

"},{"location":"selinux/","title":"SELinux Overview","text":"

SELinux enforces mandatory access control policies that confine user programs and system services, as well as access to files and network resources. Limiting privilege to the minimum required to work reduces or eliminates the ability of these programs and daemons to cause harm if faulty or compromised.

Enabling SELinux in container runtime provides an additional security control to help further enforce isolation among deployed containers and the host.

This guide describes how to enable SELinux in Kubernetes environment provided by k0s on CentOS and Red Hat Enterprise Linux (RHEL).

"},{"location":"selinux/#requirements","title":"Requirements","text":"
  • SELinux is enabled on host OS of the worker nodes.
  • SELinux has the container-selinux policy installed.
  • SELinux labels are correctly set for k0s installation files of the worker nodes.
  • SELinux is enabled in container runtime such as containerd on the worker nodes.
"},{"location":"selinux/#check-whether-selinux-is-enabled-on-host-os","title":"Check whether SELinux is enabled on host OS","text":"

SELinux is enabled on CentOS and RHEL by default. Below command output indicates SELinux is enabled.

$ getenforce\nEnforcing\n
"},{"location":"selinux/#install-container-selinux","title":"Install container-selinux","text":"

It is required to have container-selinux installed. In most Fedora based distributions including Fedora 37, Red Hat Enterprise Linux 7, 8 and 8, CentOS 7 and 8 and Rocky Linux 9 this can be achieved by installing the package container-selinux.

In RHEL 7 and CentOS 7 this is achieved by running:

yum install -y container-selinux\n

In the rest of the metnioned distributions run:

dnf install -y container-selinux\n
"},{"location":"selinux/#set-selinux-labels-for-k0s-installation-files","title":"Set SELinux labels for k0s installation files","text":"

Run below commands on the host OS of the worker nodes.

DATA_DIR=\"/var/lib/k0s\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/containerd.*\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/runc\"\nsudo restorecon -R -v ${DATA_DIR}/bin\nsudo semanage fcontext -a -t container_var_lib_t \"${DATA_DIR}/containerd(/.*)?\"\nsudo semanage fcontext -a -t container_ro_file_t \"${DATA_DIR}/containerd/io.containerd.snapshotter.*/snapshots(/.*)?\"\nsudo restorecon -R -v ${DATA_DIR}/containerd\n
"},{"location":"selinux/#enable-selinux-in-containerd-of-k0s","title":"Enable SELinux in containerd of k0s","text":"

Add below lines to /etc/k0s/containerd.toml of the worker nodes. You need to restart k0s service on the node to make the change take effect.

[plugins.\"io.containerd.grpc.v1.cri\"]\nenable_selinux = true\n
"},{"location":"selinux/#verify-selinux-works-in-kubernetes-environment","title":"Verify SELinux works in Kubernetes environment","text":"

By following the example Assign SELinux labels to a Container, deploy a testing pod using below YAML file:

apiVersion: v1\nkind: Pod\nmetadata:\nname: test-selinux\nspec:\ncontainers:\n- image: busybox\nname: test-selinux\ncommand: [\"sleep\", \"infinity\"]\nsecurityContext:\nseLinuxOptions:\nlevel: \"s0:c123,c456\"\n

After the pod starts, ssh to the worker node on which the pod is running and check the pod process. It should display the label s0:c123,c456 that you sepecified in YAML file:

$ ps -efZ | grep -F 'sleep infinity'\nsystem_u:system_r:container_t:s0:c123,c456 root 3346 3288  0 16:39 ?       00:00:00 sleep infinity\n
"},{"location":"shell-completion/","title":"Enabling Shell Completion","text":"

Generate the k0s completion script using the k0s completion <shell_name> command, for Bash, Zsh, fish, or PowerShell.

Sourcing the completion script in your shell enables k0s autocompletion.

"},{"location":"shell-completion/#bash","title":"Bash","text":"
echo 'source <(k0s completion bash)' >>~/.bashrc\n

To load completions for each session, execute once:

k0s completion bash > /etc/bash_completion.d/k0s\n
"},{"location":"shell-completion/#zsh","title":"Zsh","text":"

If shell completion is not already enabled in Zsh environment you will need to enable it:

echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n

To load completions for each session, execute once:

k0s completion zsh > \"${fpath[1]}/_k0s\"\n

Note: You must start a new shell for the setup to take effect.

"},{"location":"shell-completion/#fish","title":"Fish","text":"
k0s completion fish | source\n

To load completions for each session, execute once:

k0s completion fish > ~/.config/fish/completions/k0s.fish\n
"},{"location":"storage/","title":"Storage","text":""},{"location":"storage/#bundled-openebs-storage","title":"Bundled OpenEBS storage","text":"

K0s comes out with bundled OpenEBS installation which can be enabled by using configuration file

Use following configuration as an example:

spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n

The cluster will have two storage classes available for you to use:

k0s kubectl get storageclass\n
NAME               PROVISIONER        RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE\nopenebs-device     openebs.io/local   Delete          WaitForFirstConsumer   false                  24s\nopenebs-hostpath   openebs.io/local   Delete          WaitForFirstConsumer   false                  24s\n

The openebs-hostpath is the storage class that maps to the /var/openebs/local

The openebs-device is not configured and could be configured by manifest deployer accordingly to the OpenEBS documentation

"},{"location":"storage/#example-usage","title":"Example usage","text":"

Use following manifests as an example of pod with mounted volume:

apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: nginx-pvc\nnamespace: default\nspec:\naccessModes:\n- ReadWriteOnce\nstorageClassName: openebs-hostpath\nresources:\nrequests:\nstorage: 5Gi\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx\nnamespace: default\nlabels:\napp: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nstrategy:\ntype: Recreate\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- image: nginx name: nginx\nvolumeMounts:\n- name: persistent-storage\nmountPath: /var/lib/nginx\nvolumes:\n- name: persistent-storage\npersistentVolumeClaim:\nclaimName: nginx-pvc\n
k0s kubectl apply -f nginx.yaml\n
persistentvolumeclaim/nginx-pvc created\ndeployment.apps/nginx created\nbash-5.1# k0s kc get pods\nNAME                    READY   STATUS    RESTARTS   AGE\nnginx-d95bcb7db-gzsdt   1/1     Running   0          30s\n
k0s kubectl get pv\n
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS       REASON   AGE\npvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f   5Gi        RWO            Delete           Bound    default/nginx-pvc   openebs-hostpath            30s\n
"},{"location":"storage/#csi","title":"CSI","text":"

k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs.

When the storage solution implements Container Storage Interface (CSI), containers can communicate with the storage for creation and configuration of persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the Kubernetes Blog.

"},{"location":"storage/#installing-3rd-party-storage-solutions","title":"Installing 3rd party storage solutions","text":"

Follow your storage driver's installation instructions. Note that the Kubelet installed by k0s uses a slightly different path for its working directory (/varlib/k0s/kubelet instead of /var/lib/kubelet). Consult the CSI driver's configuration documentation on how to customize this path.

"},{"location":"storage/#example-storage-solutions","title":"Example storage solutions","text":"

Different Kubernetes storage solutions are explained in the official Kubernetes storage documentation. All of them can be used with k0s. Here are some popular ones:

  • Rook-Ceph (Open Source)
  • MinIO (Open Source)
  • Gluster (Open Source)
  • Longhorn (Open Source)
  • Amazon EBS
  • Google Persistent Disk
  • Azure Disk
  • Portworx

If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook in here.

"},{"location":"system-monitoring/","title":"System components monitoring","text":"

Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.

k0s provides a mechanism to expose system components for monitoring. System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. You can read more about metrics for Kubernetes system components here.

Note: the mechanism is an opt-in feature, you can enable it on installation:

```shell\nsudo k0s install controller --enable-metrics-scraper\n```\n
"},{"location":"system-monitoring/#jobs","title":"Jobs","text":"

The list of components which is scrapped by k0s:

  • kube-scheduler
  • kube-controller-manager

Note: kube-apiserver metrics are not scrapped since they are accessible via kubernetes endpoint within the cluster.

"},{"location":"system-monitoring/#architecture","title":"Architecture","text":"

k0s uses pushgateway with TTL to make it possible to detect issues with the metrics delivery. Default TTL is 2 minutes.

"},{"location":"system-requirements/","title":"System requirements","text":"

This page describes the system requirements for k0s.

"},{"location":"system-requirements/#minimum-memory-and-cpu-requirements","title":"Minimum memory and CPU requirements","text":"

The minimum requirements for k0s detailed below are approximations, and thus your results may vary.

Role Memory (RAM) Virtual CPU (vCPU) Controller node 1 GB 1 vCPU Worker node 0.5 GB 1 vCPU Controller + worker 1 GB 1 vCPU"},{"location":"system-requirements/#controller-node-recommendations","title":"Controller node recommendations","text":"# of Worker nodes # of Pods Recommended RAM Recommended vCPU up to 10 up to 1000 1-2 GB 1-2 vCPU up to 50 up to 5000 2-4 GB 2-4 vCPU up to 100 up to 10000 4-8 GB 2-4 vCPU up to 500 up to 50000 8-16 GB 4-8 vCPU up to 1000 up to 100000 16-32 GB 8-16 vCPU up to 5000 up to 150000 32-64 GB 16-32 vCPU

k0s has the standard Kubernetes limits for the maximum number of nodes, pods, etc. For more details, see the Kubernetes considerations for large clusters.

k0s controller node measured memory consumption can be found below on this page.

"},{"location":"system-requirements/#storage","title":"Storage","text":"

It's recommended to use an SSD for optimal storage performance (cluster latency and throughput are sensitive to storage).

The specific storage consumption for k0s is as follows:

Role Storage (k0s part) Controller node ~0.5 GB Worker node ~1.3 GB Controller + worker ~1.7 GB

Note: The operating system and application requirements must be considered in addition to the k0s part.

"},{"location":"system-requirements/#host-operating-system","title":"Host operating system","text":"
  • Linux (see Linux specific requirements for details)
  • Windows Server 2019
"},{"location":"system-requirements/#architecture","title":"Architecture","text":"
  • x86-64
  • ARM64
  • ARMv7
"},{"location":"system-requirements/#networking","title":"Networking","text":"

For information on the required ports and protocols, refer to networking.

"},{"location":"system-requirements/#external-runtime-dependencies","title":"External runtime dependencies","text":"

k0s strives to be as independent from the OS as possible. The current and past external runtime dependencies are documented here.

To run some automated compatiblility checks on your system, use k0s sysinfo.

"},{"location":"system-requirements/#controller-node-measured-memory-consumption","title":"Controller node measured memory consumption","text":"

The following table shows the measured memory consumption in the cluster of one controller node.

# of Worker nodes # of Pods (besides default) Memory consumption 1 0 510 MB 1 100 600 MB 20 0 660 MB 20 2000 1000 MB 50 0 790 MB 50 5000 1400 MB 100 0 1000 MB 100 10000 2300 MB 200 0 1500 MB 200 20000 3300 MB

Measurement details:

  • k0s v1.22.4+k0s.2 (default configuration with etcd)
  • Ubuntu Server 20.04.3 LTS, OS part of the used memory was around 180 MB
  • Hardware: AWS t3.xlarge (4 vCPUs, 16 GB RAM)
  • Pod image: nginx:1.21.4
"},{"location":"troubleshooting/","title":"Common Pitfalls","text":"

There are few common cases we've seen where k0s fails to run properly.

"},{"location":"troubleshooting/#coredns-in-crashloop","title":"CoreDNS in crashloop","text":"

The most common case we've encountered so far has been CoreDNS getting into crashloop on the node(s).

With kubectl you see something like this:

$ kubectl get pod --all-namespaces\nNAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE\nkube-system   calico-kube-controllers-5f6546844f-25px6   1/1     Running   0          167m\nkube-system   calico-node-fwjx5                          1/1     Running   0          164m\nkube-system   calico-node-t4tx5                          1/1     Running   0          164m\nkube-system   calico-node-whwsg                          1/1     Running   0          164m\nkube-system   coredns-5c98d7d4d8-tfs4q                   1/1     Error     17         167m\nkube-system   konnectivity-agent-9jkfd                   1/1     Running   0          164m\nkube-system   konnectivity-agent-bvhdb                   1/1     Running   0          164m\nkube-system   konnectivity-agent-r6mzj                   1/1     Running   0          164m\nkube-system   kube-proxy-kr2r9                           1/1     Running   0          164m\nkube-system   kube-proxy-tbljr                           1/1     Running   0          164m\nkube-system   kube-proxy-xbw7p                           1/1     Running   0          164m\nkube-system   metrics-server-7d4bcb75dd-pqkrs            1/1     Running   0          167m\n

When you check the logs, it'll show something like this:

kubectl -n kube-system logs coredns-5c98d7d4d8-tfs4q\n
plugin/loop: Loop (127.0.0.1:55953 -> :1053) detected for zone \".\", see https://coredns.io/plugins/loop#troubleshooting. Query: \"HINFO 4547991504243258144.3688648895315093531.\"\n

This is most often caused by systemd-resolved stub (or something similar) running locally and CoreDNS detects a possible loop with DNS queries.

The easiest but most crude way to workaround is to disable the systemd-resolved stub and revert the hosts /etc/resolv.conf to original

Read more at CoreDNS troubleshooting docs.

"},{"location":"troubleshooting/#k0s-controller-fails-on-arm-boxes","title":"k0s controller fails on ARM boxes","text":"

In the logs you probably see etcd not starting up properly.

Etcd is not fully supported on ARM architecture, thus you need to run k0s controller and thus also etcd process with env ETCD_UNSUPPORTED_ARCH=arm.

As etcd is not fully supported on ARM, it also means that the k0s control plane with etcd itself is not fully supported on ARM either.

"},{"location":"troubleshooting/#k0s-will-not-start-on-zfs-based-systems","title":"k0s will not start on ZFS-based systems","text":"

On ZFS-based systems k0s will fail to start because containerd runs by default in overlayfs mode to manage image layers. This is not compatible with ZFS and requires a custom config of containerd. The following steps should get k0s working on ZFS-based systems:

  • check with $ ctr -a /run/k0s/containerd.sock plugins ls that the containerd ZFS snapshotter plugin is in ok state (should be the case if ZFS kernel modules and ZFS userspace utils are correctly configured):
TYPE                            ID                       PLATFORMS      STATUS    \n...\nio.containerd.snapshotter.v1    zfs                      linux/amd64    ok\n...\n
  • create a containerd config according to the documentation: $ containerd config default > /etc/k0s/containerd.toml
  • modify the line in /etc/k0s/containerd.toml:
...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"overlayfs\"\n...\n

to

...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"zfs\"\n...\n
  • create a ZFS dataset to be used as snapshot storage at your desired location, e.g. $ zfs create -o mountpoint=/var/lib/k0s/containerd/io.containerd.snapshotter.v1.zfs rpool/containerd
  • install k0s as usual, e.g $ k0s install controller --single -c /etc/k0s/k0s.yaml
  • containerd should be launched with ZFS support and k0s should initialize the cluster correctly
"},{"location":"troubleshooting/#pods-pending-when-using-cloud-providers","title":"Pods pending when using cloud providers","text":"

Once we enable cloud provider support on kubelet on worker nodes, kubelet will automatically add a taint node.cloudprovider.kubernetes.io/uninitialized for the node. This tain will prevent normal workloads to be scheduled on the node until the cloud provider controller actually runs second initialization on the node and removes the taint. This means that these nodes are not available for scheduling until the cloud provider controller is actually successfully running on the cluster.

For troubleshooting your specific cloud provider see its documentation.

"},{"location":"troubleshooting/#k0s-not-working-with-read-only-usr","title":"k0s not working with read only /usr","text":"

By default k0s does not run on nodes where /usr is read only.

This can be fixed by changing the default path for volumePluginDir in your k0s config. You will need to change to values, one for the kubelet itself, and one for Calico.

Here is a snippet of an example config with the default values changed:

spec:\ncontrollerManager:\nextraArgs:\nflex-volume-plugin-dir: \"/etc/kubernetes/kubelet-plugins/volume/exec\"\nnetwork:\ncalico:\nflexVolumeDriverPath: /etc/k0s/kubelet-plugins/volume/exec/nodeagent~uds\nworkerProfiles:\n- name: coreos\nvalues:\nvolumePluginDir: /etc/k0s/kubelet-plugins/volume/exec/\n

With this config you can start your controller as usual. Any workers will need to be started with

k0s worker --profile coreos [TOKEN]\n
"},{"location":"troubleshooting/#profiling","title":"Profiling","text":"

We drop any debug related information and symbols from the compiled binary by utilzing -w -s linker flags.

To keep those symbols use DEBUG env variable:

DEBUG=true make k0s\n

Any value not equal to the \"false\" would work.

To add custom linker flags use LDFLAGS variable.

LD_FLAGS=\"--custom-flag=value\" make k0s\n
"},{"location":"troubleshooting/#im-using-custom-cri-and-missing-some-labels-in-prometheus-metrics","title":"I'm using custom CRI and missing some labels in Prometheus metrics","text":"

Due to removal of the embedded dockershim from Kubelet, the Kubelet's embedded cAdvisor metrics got slightly broken. If your container runtime is a custom containerd you can add --kubelet-extra-flags=\"--containerd=<path/to/containerd.sock>\" into k0s worker startup. That configures the Kubelet embedded cAdvisor to talk directly with containerd to gather the metrics and thus gets the expected labels in place.

Unfortunately this does not work on when using Docker via cri-dockerd shim. Currently, there is no easy solution to this problem.

In the future Kubelet will be refactored to get the container metrics from CRI interface rather than from the runtime directly. This work is specified and followed up in KEP-2371 but until that work completes the only option is to run a standalone cAdvisor. The known issues section in the official Kubernetes documentation about migrating away from dockershim explains the current shortcomings and shows how to run cAdvisor as a standalone DaemonSet.

"},{"location":"troubleshooting/#customized-configurations","title":"Customized configurations","text":"
  • All data directories reside under /var/lib/k0s, for example:
    • /var/lib/k0s/kubelet
    • /var/lib/k0s/etcd
"},{"location":"upgrade/","title":"Upgrade","text":"

The k0s upgrade is a simple process due to its single binary distribution. The k0s single binary file includes all the necessary parts for the upgrade and essentially the upgrade process is to replace that file and restart the service.

This tutorial explains two different approaches for k0s upgrade:

  • Upgrade a k0s node locally
  • Upgrade a k0s cluster using k0sctl
"},{"location":"upgrade/#upgrade-a-k0s-node-locally","title":"Upgrade a k0s node locally","text":"

If your k0s cluster has been deployed with k0sctl, then k0sctl provides the easiest upgrade method. In that case jump to the next chapter. However, if you have deployed k0s without k0sctl, then follow the upgrade method explained in this chapter.

Before starting the upgrade, consider moving your applications to another node if you want to avoid downtime. This can be done by draining a worker node. Remember to uncordon the worker node afterwards to tell Kubernetes that it can resume scheduling new pods onto the node.

The upgrade process is started by stopping the currently running k0s service.

sudo k0s stop\n

Now you can replace the old k0s binary file. The easiest way is to use the download script. It will download the latest k0s binary and replace the old binary with it. You can also do this manually without the download script.

curl -sSLf https://get.k0s.sh | sudo sh\n

Then you can start the service (with the upgraded k0s) and your upgrade is done.

sudo k0s start\n
"},{"location":"upgrade/#upgrade-a-k0s-cluster-using-k0sctl","title":"Upgrade a k0s cluster using k0sctl","text":"

The upgrading of k0s clusters using k0sctl occurs not through a particular command (there is no upgrade sub-command in k0sctl) but by way of the configuration file. The configuration file describes the desired state of the cluster, and when you pass the description to the k0sctl apply command a discovery of the current state is performed and the system does whatever is necessary to bring the cluster to the desired state (for example, perform an upgrade).

"},{"location":"upgrade/#k0sctl-cluster-upgrade-process","title":"k0sctl cluster upgrade process","text":"

The following operations occur during a k0sctl upgrade:

  1. Upgrade of each controller, one at a time. There is no downtime if multiple controllers are configured.

  2. Upgrade of workers, in batches of 10%.

  3. Draining of workers, which allows the workload to move to other nodes prior to the actual upgrade of the worker node components. (To skip the drain process, use the --no-drain option.)

  4. The upgrade process continues once the upgraded nodes return to Ready state.

You can configure the desired cluster version in the k0sctl configuration by setting the value of spec.k0s.version:

spec:\nk0s:\nversion: 1.27.5+k0s.0\n

If you do not specify a version, k0sctl checks online for the latest version and defaults to it.

k0sctl apply\n
...\n...\nINFO[0001] ==> Running phase: Upgrade controllers\nINFO[0001] [ssh] 10.0.0.23:22: starting upgrade\nINFO[0001] [ssh] 10.0.0.23:22: Running with legacy service name, migrating...\nINFO[0011] [ssh] 10.0.0.23:22: waiting for the k0s service to start\nINFO[0016] ==> Running phase: Upgrade workers\nINFO[0016] Upgrading 1 workers in parallel\nINFO[0016] [ssh] 10.0.0.17:22: upgrade starting\nINFO[0027] [ssh] 10.0.0.17:22: waiting for node to become ready again\nINFO[0027] [ssh] 10.0.0.17:22: upgrade successful\nINFO[0027] ==> Running phase: Disconnect from hosts\nINFO[0027] ==> Finished in 27s\nINFO[0027] k0s cluster version 1.27.5+k0s.0 is now installed\nINFO[0027] Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO[0027]      k0sctl kubeconfig\n
"},{"location":"user-management/","title":"User Management","text":""},{"location":"user-management/#adding-a-cluster-user","title":"Adding a Cluster User","text":"

Run the kubeconfig create command on the controller to add a user to the cluster. The command outputs a kubeconfig for the user, to use for authentication.

k0s kubeconfig create [username]\n
"},{"location":"user-management/#enabling-access-to-cluster-resources","title":"Enabling Access to Cluster Resources","text":"

Create the user with the system:masters group to grant the user access to the cluster:

k0s kubeconfig create --groups \"system:masters\" testUser > k0s.config\n

Create a roleBinding to grant the user access to the resources:

k0s kubectl create clusterrolebinding --kubeconfig k0s.config testUser-admin-binding --clusterrole=admin --user=testUser\n
"},{"location":"worker-node-config/","title":"Configuration options for worker nodes","text":"

Although the k0s worker command does not take in any special yaml configuration, there are still methods for configuring the workers to run various components.

"},{"location":"worker-node-config/#node-labels","title":"Node labels","text":"

The k0s worker command accepts the --labels flag, with which you can make the newly joined worker node the register itself, in the Kubernetes API, with the given set of labels.

For example, running the worker with k0s worker --token-file k0s.token --labels=\"k0sproject.io/foo=bar,k0sproject.io/other=xyz\" results in:

kubectl get node --show-labels\n
NAME      STATUS     ROLES    AGE   VERSION        LABELS\nworker0   NotReady   <none>   10s   v1.27.5+k0s  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux\n

Controller worker nodes are assigned node.k0sproject.io/role=control-plane and node-role.kubernetes.io/control-plane=true labels:

kubectl get node --show-labels\n
NAME          STATUS     ROLES           AGE   VERSION        LABELS\ncontroller0   NotReady   control-plane   10s   v1.27.5+k0s  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true\n

Note: Setting the labels is only effective on the first registration of the node. Changing the labels thereafter has no effect.

"},{"location":"worker-node-config/#taints","title":"Taints","text":"

The k0s worker command accepts the --taints flag, with which you can make the newly joined worker node the register itself with the given set of taints.

Note: Controller nodes running with --enable-worker are assigned node-role.kubernetes.io/master:NoExecute taint automatically. You can disable default taints using --no-taints parameter.

kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints\n
NAME          TAINTS\ncontroller0   [map[effect:NoSchedule key:node-role.kubernetes.io/master]]\nworker0       <none>\n
"},{"location":"worker-node-config/#kubelet-configuration","title":"Kubelet configuration","text":"

The k0s worker command accepts a generic flag to pass in any set of arguments for kubelet process.

For example, running k0s worker --token-file=k0s.token --kubelet-extra-args=\"--node-ip=1.2.3.4 --address=0.0.0.0\" passes in the given flags to Kubelet as-is. As such, you must confirm that any flags you are passing in are properly formatted and valued as k0s will not validate those flags.

"},{"location":"worker-node-config/#worker-profiles","title":"Worker Profiles","text":"

Kubelet configuration fields can also be set via a worker profiles. Worker profiles are defined in the main k0s.yaml and are used to generate ConfigMaps containing a custom kubelet.config.k8s.io/v1beta1/KubeletConfiguration object. To see examples of k0s.yaml containing worker profiles: go here. For a list of possible Kubelet configuration fields: go here.

"},{"location":"worker-node-config/#iptables-mode","title":"IPTables Mode","text":"

k0s detects iptables backend automatically based on the existing records. On a brand-new setup, iptables-nft will be used. There is a --iptables-mode flag to specify the mode explicitly. Valid values: nft, legacy and auto (default).

k0s worker --iptables-mode=nft\n
"},{"location":"cli/","title":"Index","text":""},{"location":"cli/#k0s","title":"k0s","text":"

k0s - Zero Friction Kubernetes

"},{"location":"cli/#synopsis","title":"Synopsis","text":"

k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula

"},{"location":"cli/#options","title":"Options","text":"
  -h, --help   help for k0s\n
"},{"location":"cli/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
  • k0s api - Run the controller API
  • k0s backup - Back-Up k0s configuration. Must be run as root (or with sudo)
  • k0s completion - Generate completion script
  • k0s config - Configuration related sub-commands
  • k0s controller - Run controller
  • k0s ctr - containerd CLI
  • k0s docs - Generate k0s command documentation
  • k0s etcd - Manage etcd cluster
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
  • k0s kubeconfig - Create a kubeconfig file for a specified user
  • k0s kubectl - kubectl controls the Kubernetes cluster manager
  • k0s reset - Uninstall k0s. Must be run as root (or with sudo)
  • k0s restore - restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
  • k0s start - Start the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s status - Get k0s instance status information
  • k0s stop - Stop the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s sysinfo - Display system information
  • k0s token - Manage join tokens
  • k0s version - Print the k0s version
  • k0s worker - Run worker
"},{"location":"cli/k0s/","title":"K0s","text":""},{"location":"cli/k0s/#k0s","title":"k0s","text":"

k0s - Zero Friction Kubernetes

"},{"location":"cli/k0s/#synopsis","title":"Synopsis","text":"

k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula

"},{"location":"cli/k0s/#options","title":"Options","text":"
  -h, --help   help for k0s\n
"},{"location":"cli/k0s/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
  • k0s api - Run the controller API
  • k0s backup - Back-Up k0s configuration. Must be run as root (or with sudo)
  • k0s completion - Generate completion script
  • k0s config - Configuration related sub-commands
  • k0s controller - Run controller
  • k0s ctr - containerd CLI
  • k0s docs - Generate k0s command documentation
  • k0s etcd - Manage etcd cluster
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
  • k0s kubeconfig - Create a kubeconfig file for a specified user
  • k0s kubectl - kubectl controls the Kubernetes cluster manager
  • k0s reset - Uninstall k0s. Must be run as root (or with sudo)
  • k0s restore - restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
  • k0s start - Start the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s status - Get k0s instance status information
  • k0s stop - Stop the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s sysinfo - Display system information
  • k0s token - Manage join tokens
  • k0s version - Print the k0s version
  • k0s worker - Run worker
"},{"location":"cli/k0s_airgap/","title":"K0s airgap","text":""},{"location":"cli/k0s_airgap/#k0s-airgap","title":"k0s airgap","text":"

Manage airgap setup

"},{"location":"cli/k0s_airgap/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for airgap\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s airgap list-images - List image names and version needed for air-gap install
"},{"location":"cli/k0s_airgap_list-images/","title":"K0s airgap list images","text":""},{"location":"cli/k0s_airgap_list-images/#k0s-airgap-list-images","title":"k0s airgap list-images","text":"

List image names and version needed for air-gap install

k0s airgap list-images [flags]\n
"},{"location":"cli/k0s_airgap_list-images/#examples","title":"Examples","text":"
k0s airgap list-images\n
"},{"location":"cli/k0s_airgap_list-images/#options","title":"Options","text":"
      --all                    include all images, even if they are not used in the current configuration\n  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for list-images\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap_list-images/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
"},{"location":"cli/k0s_api/","title":"K0s api","text":""},{"location":"cli/k0s_api/#k0s-api","title":"k0s api","text":"

Run the controller API

k0s api [flags]\n
"},{"location":"cli/k0s_api/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for api\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_api/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_backup/","title":"K0s backup","text":""},{"location":"cli/k0s_backup/#k0s-backup","title":"k0s backup","text":"

Back-Up k0s configuration. Must be run as root (or with sudo)

k0s backup [flags]\n
"},{"location":"cli/k0s_backup/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for backup\n      --save-path string       destination directory path for backup assets, use '-' for stdout\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_backup/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_completion/","title":"K0s completion","text":""},{"location":"cli/k0s_completion/#k0s-completion","title":"k0s completion","text":"

Generate completion script

"},{"location":"cli/k0s_completion/#synopsis","title":"Synopsis","text":"

To load completions:

Bash:

$ source <(k0s completion bash)

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once","title":"To load completions for each session, execute once:","text":"

$ k0s completion bash > /etc/bash_completion.d/k0s

Zsh:

"},{"location":"cli/k0s_completion/#if-shell-completion-is-not-already-enabled-in-your-environment-you-will-need","title":"If shell completion is not already enabled in your environment you will need","text":""},{"location":"cli/k0s_completion/#to-enable-it-you-can-execute-the-following-once","title":"to enable it. You can execute the following once:","text":"

$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_1","title":"To load completions for each session, execute once:","text":"

$ k0s completion zsh > \"${fpath[1]}/_k0s\"

"},{"location":"cli/k0s_completion/#you-will-need-to-start-a-new-shell-for-this-setup-to-take-effect","title":"You will need to start a new shell for this setup to take effect.","text":"

Fish:

$ k0s completion fish | source

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_2","title":"To load completions for each session, execute once:","text":"

$ k0s completion fish > ~/.config/fish/completions/k0s.fish

k0s completion <bash|zsh|fish|powershell>\n
"},{"location":"cli/k0s_completion/#options","title":"Options","text":"
  -h, --help   help for completion\n
"},{"location":"cli/k0s_completion/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_config/","title":"K0s config","text":""},{"location":"cli/k0s_config/#k0s-config","title":"k0s config","text":"

Configuration related sub-commands

"},{"location":"cli/k0s_config/#options","title":"Options","text":"
  -h, --help   help for config\n
"},{"location":"cli/k0s_config/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s config create - Output the default k0s configuration yaml to stdout
  • k0s config edit - Launch the editor configured in your shell to edit k0s configuration
  • k0s config status - Display dynamic configuration reconciliation status
  • k0s config validate - Validate k0s configuration
"},{"location":"cli/k0s_config_create/","title":"K0s config create","text":""},{"location":"cli/k0s_config_create/#k0s-config-create","title":"k0s config create","text":"

Output the default k0s configuration yaml to stdout

k0s config create [flags]\n
"},{"location":"cli/k0s_config_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for create\n      --include-images         include the default images in the output\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_create/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_edit/","title":"K0s config edit","text":""},{"location":"cli/k0s_config_edit/#k0s-config-edit","title":"k0s config edit","text":"

Launch the editor configured in your shell to edit k0s configuration

k0s config edit [flags]\n
"},{"location":"cli/k0s_config_edit/#options","title":"Options","text":"
      --data-dir string   Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug             Debug logging [$DEBUG]\n  -h, --help              help for edit\n
"},{"location":"cli/k0s_config_edit/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_status/","title":"K0s config status","text":""},{"location":"cli/k0s_config_status/#k0s-config-status","title":"k0s config status","text":"

Display dynamic configuration reconciliation status

k0s config status [flags]\n
"},{"location":"cli/k0s_config_status/#options","title":"Options","text":"
      --data-dir string   Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug             Debug logging [$DEBUG]\n  -h, --help              help for status\n  -o, --output string     Output format. Must be one of yaml|json\n
"},{"location":"cli/k0s_config_status/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_validate/","title":"K0s config validate","text":""},{"location":"cli/k0s_config_validate/#k0s-config-validate","title":"k0s config validate","text":"

Validate k0s configuration

"},{"location":"cli/k0s_config_validate/#synopsis","title":"Synopsis","text":"

Example: k0s config validate --config path_to_config.yaml

k0s config validate [flags]\n
"},{"location":"cli/k0s_config_validate/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for validate\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_validate/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_controller/","title":"K0s controller","text":""},{"location":"cli/k0s_controller/#k0s-controller","title":"k0s controller","text":"

Run controller

k0s controller [join-token] [flags]\n
"},{"location":"cli/k0s_controller/#examples","title":"Examples","text":"
    Command to associate master nodes:\n    CLI argument:\n    $ k0s controller [join-token]\n\n    or CLI flag:\n    $ k0s controller --token-file [path_to_file]\n    Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_controller/#options","title":"Options","text":"
      --api-server string                              HACK: api-server for the windows worker node\n      --cidr-range string                              HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string                             HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n  -c, --config string                                  config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string                              container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string                                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                                          Debug logging (default: false)\n      --debugListenOn string                           Http listenOn for Debug pprof handler (default \":6060\")\n      --disable-components strings                     disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n      --enable-cloud-provider                          Whether or not to enable cloud provider support in kubelet\n      --enable-dynamic-config                          enable cluster-wide dynamic config based on custom resource\n      --enable-k0s-cloud-provider                      enables the k0s-cloud-provider (default false)\n      --enable-metrics-scraper                         enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n      --enable-worker                                  enable worker (default false)\n  -h, --help                                           help for controller\n      --ignore-pre-flight-checks                       continue even if pre-flight checks fail\n      --iptables-mode string                           iptables mode (valid values: nft, legacy, auto). default: auto\n      --k0s-cloud-provider-port int                    the port that k0s-cloud-provider binds on (default 10258)\n      --k0s-cloud-provider-update-frequency duration   the frequency of k0s-cloud-provider node updates (default 2m0s)\n      --kube-controller-manager-extra-args string      extra args for kube-controller-manager\n      --kubelet-extra-args string                      extra args for kubelet\n      --labels strings                                 Node labels, list of key=value pairs\n  -l, --logging stringToString                         Logging Levels for the different components (default [kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1])\n      --no-taints                                      disable default taints for controller node\n      --profile string                                 worker profile to use on the node (default \"default\")\n      --single                                         enable single node (implies --enable-worker, default false)\n      --status-socket string                           Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings                                 Node taints, list of key=value:effect strings\n      --token-file string                              Path to the file containing join-token.\n  -v, --verbose                                        Verbose logging (default: false)\n
"},{"location":"cli/k0s_controller/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_ctr/","title":"K0s ctr","text":""},{"location":"cli/k0s_ctr/#k0s-ctr","title":"k0s ctr","text":"

containerd CLI

"},{"location":"cli/k0s_ctr/#synopsis","title":"Synopsis","text":"

ctr is an unsupported debug and administrative client for interacting with the containerd daemon. Because it is unsupported, the commands, options, and operations are not guaranteed to be backward compatible or stable from release to release of the containerd project.

k0s ctr [flags]\n
"},{"location":"cli/k0s_ctr/#options","title":"Options","text":"
  -h, --help   help for ctr\n
"},{"location":"cli/k0s_ctr/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_docs/","title":"K0s docs","text":""},{"location":"cli/k0s_docs/#k0s-docs","title":"k0s docs","text":"

Generate k0s command documentation

k0s docs <markdown|man> [flags]\n
"},{"location":"cli/k0s_docs/#options","title":"Options","text":"
  -h, --help   help for docs\n
"},{"location":"cli/k0s_docs/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_etcd/","title":"K0s etcd","text":""},{"location":"cli/k0s_etcd/#k0s-etcd","title":"k0s etcd","text":"

Manage etcd cluster

"},{"location":"cli/k0s_etcd/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for etcd\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s etcd leave - Sign off a given etc node from etcd cluster
  • k0s etcd member-list - Returns etcd cluster members list
"},{"location":"cli/k0s_etcd_leave/","title":"K0s etcd leave","text":""},{"location":"cli/k0s_etcd_leave/#k0s-etcd-leave","title":"k0s etcd leave","text":"

Sign off a given etc node from etcd cluster

k0s etcd leave [flags]\n
"},{"location":"cli/k0s_etcd_leave/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for leave\n      --peer-address string    etcd peer address\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_leave/#see-also","title":"SEE ALSO","text":"
  • k0s etcd - Manage etcd cluster
"},{"location":"cli/k0s_etcd_member-list/","title":"K0s etcd member list","text":""},{"location":"cli/k0s_etcd_member-list/#k0s-etcd-member-list","title":"k0s etcd member-list","text":"

Returns etcd cluster members list

k0s etcd member-list [flags]\n
"},{"location":"cli/k0s_etcd_member-list/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for member-list\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_member-list/#see-also","title":"SEE ALSO","text":"
  • k0s etcd - Manage etcd cluster
"},{"location":"cli/k0s_install/","title":"K0s install","text":""},{"location":"cli/k0s_install/#k0s-install","title":"k0s install","text":"

Install k0s on a brand-new system. Must be run as root (or with sudo)

"},{"location":"cli/k0s_install/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -e, --env stringArray        set environment variable\n      --force                  force init script creation\n  -h, --help                   help for install\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_install/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s install controller - Install k0s controller on a brand-new system. Must be run as root (or with sudo)
  • k0s install worker - Install k0s worker on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install_controller/","title":"K0s install controller","text":""},{"location":"cli/k0s_install_controller/#k0s-install-controller","title":"k0s install controller","text":"

Install k0s controller on a brand-new system. Must be run as root (or with sudo)

k0s install controller [flags]\n
"},{"location":"cli/k0s_install_controller/#examples","title":"Examples","text":"
All default values of controller command will be passed to the service stub unless overridden.\n\nWith the controller subcommand you can setup a single node cluster by running:\n\n    k0s install controller --single\n
"},{"location":"cli/k0s_install_controller/#options","title":"Options","text":"
      --api-server string                              HACK: api-server for the windows worker node\n      --cidr-range string                              HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string                             HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n  -c, --config string                                  config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string                              container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string                                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                                          Debug logging (default: false)\n      --debugListenOn string                           Http listenOn for Debug pprof handler (default \":6060\")\n      --disable-components strings                     disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n      --enable-cloud-provider                          Whether or not to enable cloud provider support in kubelet\n      --enable-dynamic-config                          enable cluster-wide dynamic config based on custom resource\n      --enable-k0s-cloud-provider                      enables the k0s-cloud-provider (default false)\n      --enable-metrics-scraper                         enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n      --enable-worker                                  enable worker (default false)\n  -h, --help                                           help for controller\n      --iptables-mode string                           iptables mode (valid values: nft, legacy, auto). default: auto\n      --k0s-cloud-provider-port int                    the port that k0s-cloud-provider binds on (default 10258)\n      --k0s-cloud-provider-update-frequency duration   the frequency of k0s-cloud-provider node updates (default 2m0s)\n      --kube-controller-manager-extra-args string      extra args for kube-controller-manager\n      --kubelet-extra-args string                      extra args for kubelet\n      --labels strings                                 Node labels, list of key=value pairs\n  -l, --logging stringToString                         Logging Levels for the different components (default [kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1])\n      --no-taints                                      disable default taints for controller node\n      --profile string                                 worker profile to use on the node (default \"default\")\n      --single                                         enable single node (implies --enable-worker, default false)\n      --status-socket string                           Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings                                 Node taints, list of key=value:effect strings\n      --token-file string                              Path to the file containing join-token.\n  -v, --verbose                                        Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_controller/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -e, --env stringArray   set environment variable\n      --force             force init script creation\n
"},{"location":"cli/k0s_install_controller/#see-also","title":"SEE ALSO","text":"
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install_worker/","title":"K0s install worker","text":""},{"location":"cli/k0s_install_worker/#k0s-install-worker","title":"k0s install worker","text":"

Install k0s worker on a brand-new system. Must be run as root (or with sudo)

k0s install worker [flags]\n
"},{"location":"cli/k0s_install_worker/#examples","title":"Examples","text":"
Worker subcommand allows you to pass in all available worker parameters.\nAll default values of worker command will be passed to the service stub unless overridden.\n\nWindows flags like \"--api-server\", \"--cidr-range\" and \"--cluster-dns\" will be ignored since install command doesn't yet support Windows services\n
"},{"location":"cli/k0s_install_worker/#options","title":"Options","text":"
      --api-server string           HACK: api-server for the windows worker node\n      --cidr-range string           HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string          HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n      --cri-socket string           container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string             Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                       Debug logging (default: false)\n      --debugListenOn string        Http listenOn for Debug pprof handler (default \":6060\")\n      --enable-cloud-provider       Whether or not to enable cloud provider support in kubelet\n  -h, --help                        help for worker\n      --iptables-mode string        iptables mode (valid values: nft, legacy, auto). default: auto\n      --kubelet-extra-args string   extra args for kubelet\n      --labels strings              Node labels, list of key=value pairs\n  -l, --logging stringToString      Logging Levels for the different components (default [containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info])\n      --profile string              worker profile to use on the node (default \"default\")\n      --status-socket string        Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings              Node taints, list of key=value:effect strings\n      --token-file string           Path to the file containing token.\n  -v, --verbose                     Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_worker/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -e, --env stringArray   set environment variable\n      --force             force init script creation\n
"},{"location":"cli/k0s_install_worker/#see-also","title":"SEE ALSO","text":"
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_kubeconfig/","title":"K0s kubeconfig","text":""},{"location":"cli/k0s_kubeconfig/#k0s-kubeconfig","title":"k0s kubeconfig","text":"

Create a kubeconfig file for a specified user

k0s kubeconfig [command] [flags]\n
"},{"location":"cli/k0s_kubeconfig/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for kubeconfig\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s kubeconfig admin - Display Admin's Kubeconfig file
  • k0s kubeconfig create - Create a kubeconfig for a user
"},{"location":"cli/k0s_kubeconfig_admin/","title":"K0s kubeconfig admin","text":""},{"location":"cli/k0s_kubeconfig_admin/#k0s-kubeconfig-admin","title":"k0s kubeconfig admin","text":"

Display Admin's Kubeconfig file

"},{"location":"cli/k0s_kubeconfig_admin/#synopsis","title":"Synopsis","text":"

Print kubeconfig for the Admin user to stdout

k0s kubeconfig admin [flags]\n
"},{"location":"cli/k0s_kubeconfig_admin/#examples","title":"Examples","text":"
    $ k0s kubeconfig admin > ~/.kube/config\n    $ export KUBECONFIG=~/.kube/config\n    $ kubectl get nodes\n
"},{"location":"cli/k0s_kubeconfig_admin/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for admin\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_admin/#see-also","title":"SEE ALSO","text":"
  • k0s kubeconfig - Create a kubeconfig file for a specified user
"},{"location":"cli/k0s_kubeconfig_create/","title":"K0s kubeconfig create","text":""},{"location":"cli/k0s_kubeconfig_create/#k0s-kubeconfig-create","title":"k0s kubeconfig create","text":"

Create a kubeconfig for a user

"},{"location":"cli/k0s_kubeconfig_create/#synopsis","title":"Synopsis","text":"

Create a kubeconfig with a signed certificate and public key for a given user (and optionally user groups) Note: A certificate once signed cannot be revoked for a particular user

k0s kubeconfig create username [flags]\n
"},{"location":"cli/k0s_kubeconfig_create/#examples","title":"Examples","text":"
    Command to create a kubeconfig for a user:\n    CLI argument:\n    $ k0s kubeconfig create username\n\n    optionally add groups:\n    $ k0s kubeconfig create username --groups [groups]\n
"},{"location":"cli/k0s_kubeconfig_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n      --groups string          Specify groups\n  -h, --help                   help for create\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_create/#see-also","title":"SEE ALSO","text":"
  • k0s kubeconfig - Create a kubeconfig file for a specified user
"},{"location":"cli/k0s_kubectl/","title":"K0s kubectl","text":""},{"location":"cli/k0s_kubectl/#k0s-kubectl","title":"k0s kubectl","text":"

kubectl controls the Kubernetes cluster manager

"},{"location":"cli/k0s_kubectl/#synopsis","title":"Synopsis","text":"

kubectl controls the Kubernetes cluster manager.

Find more information at: https://kubernetes.io/docs/reference/kubectl/

k0s kubectl [flags]\n
"},{"location":"cli/k0s_kubectl/#options","title":"Options","text":"
      --as string                      Username to impersonate for the operation. User could be a regular user or a service account in a namespace.\n      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.\n      --as-uid string                  UID to impersonate for the operation.\n      --cache-dir string               Default cache directory (default \"/home/runner/.kube/cache\")\n      --certificate-authority string   Path to a cert file for the certificate authority\n      --client-certificate string      Path to a client certificate file for TLS\n      --client-key string              Path to a client key file for TLS\n      --cluster string                 The name of the kubeconfig cluster to use\n      --context string                 The name of the kubeconfig context to use\n      --data-dir string                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug                          Debug logging [$DEBUG]\n      --disable-compression            If true, opt-out of response compression for all requests to the server\n  -h, --help                           help for kubectl\n      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n      --kubeconfig string              Path to the kubeconfig file to use for CLI requests.\n      --log-flush-frequency duration   Maximum number of seconds between log flushes (default 5s)\n      --match-server-version           Require server version to match client version\n  -n, --namespace string               If present, the namespace scope for this CLI request\n      --password string                Password for basic authentication to the API server\n      --profile string                 Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default \"none\")\n      --profile-output string          Name of the file to write the profile to (default \"profile.pprof\")\n      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\")\n  -s, --server string                  The address and port of the Kubernetes API server\n      --tls-server-name string         Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used\n      --token string                   Bearer token for authentication to the API server\n      --user string                    The name of the kubeconfig user to use\n      --username string                Username for basic authentication to the API server\n  -v, --v Level                        number for the log level verbosity\n      --vmodule moduleSpec             comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format)\n      --warnings-as-errors             Treat warnings received from the server as errors and exit with a non-zero exit code\n
"},{"location":"cli/k0s_kubectl/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_reset/","title":"K0s reset","text":""},{"location":"cli/k0s_reset/#k0s-reset","title":"k0s reset","text":"

Uninstall k0s. Must be run as root (or with sudo)

k0s reset [flags]\n
"},{"location":"cli/k0s_reset/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string      container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for reset\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_reset/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_restore/","title":"K0s restore","text":""},{"location":"cli/k0s_restore/#k0s-restore","title":"k0s restore","text":"

restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)

k0s restore filename [flags]\n
"},{"location":"cli/k0s_restore/#options","title":"Options","text":"
      --config-out string      Specify desired name and full path for the restored k0s.yaml file (default: /home/runner/work/k0s/k0s/k0s_<archive timestamp>.yaml\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for restore\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_restore/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_start/","title":"K0s start","text":""},{"location":"cli/k0s_start/#k0s-start","title":"k0s start","text":"

Start the k0s service configured on this host. Must be run as root (or with sudo)

k0s start [flags]\n
"},{"location":"cli/k0s_start/#options","title":"Options","text":"
  -h, --help   help for start\n
"},{"location":"cli/k0s_start/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_status/","title":"K0s status","text":""},{"location":"cli/k0s_status/#k0s-status","title":"k0s status","text":"

Get k0s instance status information

k0s status [flags]\n
"},{"location":"cli/k0s_status/#examples","title":"Examples","text":"
The command will return information about system init, PID, k0s role, kubeconfig and similar.\n
"},{"location":"cli/k0s_status/#options","title":"Options","text":"
  -h, --help                   help for status\n  -o, --out string             sets type of output to json or yaml\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s status components - Get k0s instance component status information
"},{"location":"cli/k0s_status_components/","title":"K0s status components","text":""},{"location":"cli/k0s_status_components/#k0s-status-components","title":"k0s status components","text":"

Get k0s instance component status information

k0s status components [flags]\n
"},{"location":"cli/k0s_status_components/#examples","title":"Examples","text":"
The command will return information about k0s components.\n
"},{"location":"cli/k0s_status_components/#options","title":"Options","text":"
  -h, --help            help for components\n      --max-count int   how many latest probes to show (default 1)\n
"},{"location":"cli/k0s_status_components/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -o, --out string             sets type of output to json or yaml\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status_components/#see-also","title":"SEE ALSO","text":"
  • k0s status - Get k0s instance status information
"},{"location":"cli/k0s_stop/","title":"K0s stop","text":""},{"location":"cli/k0s_stop/#k0s-stop","title":"k0s stop","text":"

Stop the k0s service configured on this host. Must be run as root (or with sudo)

k0s stop [flags]\n
"},{"location":"cli/k0s_stop/#options","title":"Options","text":"
  -h, --help   help for stop\n
"},{"location":"cli/k0s_stop/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_sysinfo/","title":"K0s sysinfo","text":""},{"location":"cli/k0s_sysinfo/#k0s-sysinfo","title":"k0s sysinfo","text":"

Display system information

"},{"location":"cli/k0s_sysinfo/#synopsis","title":"Synopsis","text":"

Runs k0s's pre-flight checks and issues the results to stdout.

k0s sysinfo [flags]\n
"},{"location":"cli/k0s_sysinfo/#options","title":"Options","text":"
      --controller        Include controller-specific sysinfo (default true)\n      --data-dir string   Data Directory for k0s (default \"/var/lib/k0s\")\n  -h, --help              help for sysinfo\n      --worker            Include worker-specific sysinfo (default true)\n
"},{"location":"cli/k0s_sysinfo/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_token/","title":"K0s token","text":""},{"location":"cli/k0s_token/#k0s-token","title":"k0s token","text":"

Manage join tokens

"},{"location":"cli/k0s_token/#options","title":"Options","text":"
  -h, --help   help for token\n
"},{"location":"cli/k0s_token/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s token create - Create join token
  • k0s token invalidate - Invalidates existing join token
  • k0s token list - List join tokens
  • k0s token pre-shared - Generates token and secret and stores them as a files
"},{"location":"cli/k0s_token_create/","title":"K0s token create","text":""},{"location":"cli/k0s_token_create/#k0s-token-create","title":"k0s token create","text":"

Create join token

k0s token create [flags]\n
"},{"location":"cli/k0s_token_create/#examples","title":"Examples","text":"
k0s token create --role worker --expiry 100h //sets expiration time to 100 hours\nk0s token create --role worker --expiry 10m  //sets expiration time to 10 minutes\n
"},{"location":"cli/k0s_token_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n      --expiry string          Expiration time of the token. Format 1.5h, 2h45m or 300ms. (default \"0s\")\n  -h, --help                   help for create\n      --role string            Either worker or controller (default \"worker\")\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n      --wait                   wait forever (default false)\n
"},{"location":"cli/k0s_token_create/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_invalidate/","title":"K0s token invalidate","text":""},{"location":"cli/k0s_token_invalidate/#k0s-token-invalidate","title":"k0s token invalidate","text":"

Invalidates existing join token

k0s token invalidate [flags]\n
"},{"location":"cli/k0s_token_invalidate/#examples","title":"Examples","text":"
k0s token invalidate xyz123\n
"},{"location":"cli/k0s_token_invalidate/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for invalidate\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_invalidate/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_list/","title":"K0s token list","text":""},{"location":"cli/k0s_token_list/#k0s-token-list","title":"k0s token list","text":"

List join tokens

k0s token list [flags]\n
"},{"location":"cli/k0s_token_list/#examples","title":"Examples","text":"
k0s token list --role worker // list worker tokens\n
"},{"location":"cli/k0s_token_list/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for list\n      --role string            Either worker, controller or empty for all roles\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_list/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_pre-shared/","title":"K0s token pre shared","text":""},{"location":"cli/k0s_token_pre-shared/#k0s-token-pre-shared","title":"k0s token pre-shared","text":"

Generates token and secret and stores them as a files

k0s token pre-shared [flags]\n
"},{"location":"cli/k0s_token_pre-shared/#examples","title":"Examples","text":"
k0s token pre-shared --role worker --cert <path>/<to>/ca.crt --url https://<controller-ip>:<port>/\n
"},{"location":"cli/k0s_token_pre-shared/#options","title":"Options","text":"
      --cert string            path to the CA certificate file\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for pre-shared\n      --out string             path to the output directory. Default: current dir (default \".\")\n      --role string            token role. valid values: worker, controller. Default: worker (default \"worker\")\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --url string             url of the api server to join\n      --valid duration         how long token is valid, in Go duration format\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_pre-shared/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_version/","title":"K0s version","text":""},{"location":"cli/k0s_version/#k0s-version","title":"k0s version","text":"

Print the k0s version

k0s version [flags]\n
"},{"location":"cli/k0s_version/#options","title":"Options","text":"
  -a, --all    use to print all k0s version info\n  -h, --help   help for version\n  -j, --json   use to print all k0s version info in json\n
"},{"location":"cli/k0s_version/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_worker/","title":"K0s worker","text":""},{"location":"cli/k0s_worker/#k0s-worker","title":"k0s worker","text":"

Run worker

k0s worker [join-token] [flags]\n
"},{"location":"cli/k0s_worker/#examples","title":"Examples","text":"
    Command to add worker node to the master node:\n    CLI argument:\n    $ k0s worker [token]\n\n    or CLI flag:\n    $ k0s worker --token-file [path_to_file]\n    Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_worker/#options","title":"Options","text":"
      --api-server string           HACK: api-server for the windows worker node\n      --cidr-range string           HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string          HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n      --cri-socket string           container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string             Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                       Debug logging (default: false)\n      --debugListenOn string        Http listenOn for Debug pprof handler (default \":6060\")\n      --enable-cloud-provider       Whether or not to enable cloud provider support in kubelet\n  -h, --help                        help for worker\n      --ignore-pre-flight-checks    continue even if pre-flight checks fail\n      --iptables-mode string        iptables mode (valid values: nft, legacy, auto). default: auto\n      --kubelet-extra-args string   extra args for kubelet\n      --labels strings              Node labels, list of key=value pairs\n  -l, --logging stringToString      Logging Levels for the different components (default [kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1])\n      --profile string              worker profile to use on the node (default \"default\")\n      --status-socket string        Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings              Node taints, list of key=value:effect strings\n      --token-file string           Path to the file containing token.\n  -v, --verbose                     Verbose logging (default: false)\n
"},{"location":"cli/k0s_worker/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"contributors/CODE_OF_CONDUCT/","title":"k0s Community Code of Conduct","text":"

k0s follows the CNCF Code of Conduct.

"},{"location":"contributors/github_workflow/","title":"GitHub Workflow","text":"

This guide assumes you have already cloned the upstream repo to your system via git clone, or via go get github.com/k0sproject/k0s.

"},{"location":"contributors/github_workflow/#fork-the-project","title":"Fork The Project","text":"
  1. Go to http://github.com/k0sproject/k0s
  2. On the top, right-hand side, click on \"fork\" and select your username for the fork destination.
"},{"location":"contributors/github_workflow/#adding-the-forked-remote","title":"Adding the Forked Remote","text":"
export GITHUB_USER={ your github username }\n
cd $WORKDIR/k0s\ngit remote add $GITHUB_USER git@github.com:${GITHUB_USER}/k0s.git\n\n# Prevent push to Upstream\ngit remote set-url --push origin no_push\n\n# Set your fork remote as a default push target\ngit push --set-upstream $GITHUB_USER main\n

Your remotes should look something like this:

git remote -v\n
origin  https://github.com/k0sproject/k0s (fetch)\norigin  no_push (push)\nmy_fork git@github.com:{ github_username }/k0s.git (fetch)\nmy_fork git@github.com:{ github_username }/k0s.git (push)\n
"},{"location":"contributors/github_workflow/#create-rebase-your-feature-branch","title":"Create & Rebase Your Feature Branch","text":"

Create a feature branch and switch to it:

git checkout -b my_feature_branch\n

Rebase your branch:

git fetch origin && \\\ngit rebase origin/main\n
Current branch my_feature_branch is up to date.\n

Please don't use git pull instead of the above fetch / rebase. git pull does a merge, which leaves merge commits. These make the commit history messy and violate the principle that commits ought to be individually understandable and useful.

"},{"location":"contributors/github_workflow/#commit-push","title":"Commit & Push","text":"

Commit and sign your changes:

git commit --signoff\n

The commit message should have a short, capitalized title without trailing period as first line. After the title a blank line and then a longer description that explains why the change was made, unless it is obvious.

Use imperative mood in the commit message.

For example:

Summarize changes in around 50 characters or less\n\nMore detailed explanatory text, if necessary. Wrap it to about 72\ncharacters or so. In some contexts, the first line is treated as the\nsubject of the commit and the rest of the text as the body. The\nblank line separating the summary from the body is critical (unless\nyou omit the body entirely); various tools like `log`, `shortlog`\nand `rebase` can get confused if you run the two together.\n\nExplain the problem that this commit is solving. Focus on why you\nare making this change as opposed to how (the code explains that).\nAre there side effects or other unintuitive consequences of this\nchange? Here's the place to explain them.\n\nFurther paragraphs come after blank lines.\n\n - Bullet points are okay, too\n\n - Typically a hyphen or asterisk is used for the bullet, preceded\n   by a single space, with blank lines in between.\n\nIf you use an issue tracker, put references to them at the bottom,\nlike this:\n\nFixes: https://github.com/k0sproject/k0s/issues/373\nSee also: #456, #789\n\nSigned-off-by: Name Lastname <user@example.com>\n

You can go back and edit/build/test some more, then commit --amend in a few cycles.

When ready, push your changes to your fork's repository:

git push --set-upstream my_fork my_feature_branch\n
"},{"location":"contributors/github_workflow/#open-a-pull-request","title":"Open a Pull Request","text":"

See GitHub's docs on how to create a pull request from a fork.

"},{"location":"contributors/github_workflow/#get-a-code-review","title":"Get a code review","text":"

Once your pull request has been opened it will be assigned to one or more reviewers, and will go through a series of smoke tests.

Commit changes made in response to review comments should be added to the same branch on your fork.

Very small PRs are easy to review. Very large PRs are very difficult to review.

"},{"location":"contributors/github_workflow/#squashing-commits","title":"Squashing Commits","text":"

Commits on your branch should represent meaningful milestones or units of work. Small commits that contain typo fixes, rebases, review feedbacks, etc should be squashed.

To do that, it's best to perform an interactive rebase:

"},{"location":"contributors/github_workflow/#example","title":"Example","text":"

Rebase your feature branch against upstream main branch:

git rebase -i origin/main\n

If your PR has 3 commits, output would be similar to this:

pick f7f3f6d Changed some code\npick 310154e fixed some typos\npick a5f4a0d made some review changes\n\n# Rebase 710f0f8..a5f4a0d onto 710f0f8\n#\n# Commands:\n# p, pick <commit> = use commit\n# r, reword <commit> = use commit, but edit the commit message\n# e, edit <commit> = use commit, but stop for amending\n# s, squash <commit> = use commit, but meld into previous commit\n# f, fixup <commit> = like \"squash\", but discard this commit's log message\n# x, exec <command> = run command (the rest of the line) using shell\n# b, break = stop here (continue rebase later with 'git rebase --continue')\n# d, drop <commit> = remove commit\n# l, label <label> = label current HEAD with a name\n# t, reset <label> = reset HEAD to a label\n# m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n# .       create a merge commit using the original merge commit's\n# .       message (or the oneline, if no original merge commit was\n# .       specified). Use -c <commit> to reword the commit message.\n#\n# These lines can be re-ordered; they are executed from top to bottom.\n#\n# However, if you remove everything, the rebase will be aborted.\n#\n# Note that empty commits are commented out\n

Use a command line text editor to change the word pick to f of fixup for the commits you want to squash, then save your changes and continue the rebase:

Per the output above, you can see that:

fixup <commit> = like \"squash\", but discard this commit's log message\n

Which means that when rebased, the commit message \"fixed some typos\" will be removed, and squashed with the parent commit.

"},{"location":"contributors/github_workflow/#push-your-final-changes","title":"Push Your Final Changes","text":"

Once done, you can push the final commits to your branch:

git push --force\n

You can run multiple iteration of rebase/push -f, if needed.

"},{"location":"contributors/overview/","title":"Contributing to k0s","text":"

Thank you for taking the time to make a contribution to k0s. The following document is a set of guidelines and instructions for contributing to k0s.

When contributing to this repository, please consider first discussing the change you wish to make by opening an issue.

"},{"location":"contributors/overview/#code-of-conduct","title":"Code of Conduct","text":"

Our code of conduct can be found in the link below. Please follow it in all your interactions with the project.

  • Code Of Conduct
"},{"location":"contributors/overview/#github-workflow","title":"GitHub Workflow","text":"

We use GitHub flow, so all code changes are tracked via Pull Requests. A detailed guide on the recommended workflow can be found below:

  • GitHub Workflow
"},{"location":"contributors/overview/#code-testing","title":"Code Testing","text":"

All submitted PRs go through a set of tests and reviews. You can run most of these tests before a PR is submitted. In fact, we recommend it, because it will save on many possible review iterations and automated tests. The testing guidelines can be found here:

  • Contributor's Guide to Testing
"},{"location":"contributors/overview/#license","title":"License","text":"

By contributing, you agree that your contributions will be licensed as followed:

  • All content residing under the \"docs/\" directory of this repository is licensed under \"Creative Commons Attribution Share Alike 4.0 International\" (CC-BY-SA-4.0). See docs/LICENCE for details.
  • Content outside of the above mentioned directories or restrictions above is available under the \"Apache License 2.0\".
"},{"location":"contributors/overview/#community","title":"Community","text":"

Some of you might have noticed we have official community blog hosted on Medium. If you are not yet following us, we'd like to invite you to do so now! Make sure to follow us on Twitter as well \ud83d\ude0a

We have also decided to participate in the Lens Forums. As part of our ongoing collaboration with the Lens IDE team, who are not only close friends of the k0s crew but also widely embraced by the Kubernetes user community, it was only natural for us to join forces on their platform. By becoming a part of the Lens Forums, you can easily connect with us through the dedicated k0s categories. Stay in the loop with the latest news, engage in technical discussions, and contribute your expertise and feedback!

"},{"location":"contributors/testing/","title":"Testing Your Code","text":"

k0s uses github actions to run automated tests on any PR, before merging. However, a PR will not be reviewed before all tests are green, so to save time and prevent your PR from going stale, it is best to test it before submitting the PR.

"},{"location":"contributors/testing/#run-local-verifications","title":"Run Local Verifications","text":"

Please run the following style and formatting commands and fix/check-in any changes:

  1. Linting

    We use golangci-lint for style verification. In the repository's root directory, simply run:

    make lint\n

    There's no need to install golangci-lint manually. The build system will take care of that.

  2. Go fmt

    go fmt ./...\n
  3. Checking the documentation

    Verify any changes to the documentation by following the instructions here.

  4. Pre-submit Flight Checks

    In the repository root directory, make sure that:

    • make build && git diff --exit-code runs successfully. Verifies that the build is working and that the generated source code matches the one that's checked into source control.
    • make check-unit runs successfully. Verifies that all the unit tests pass.
    • make check-basic runs successfully. Verifies basic cluster functionality using one controller and two workers.
    • make check-hacontrolplane runs successfully. Verifies that joining of controllers works.

    Please note that this last test is prone to \"flakiness\", so it might fail on occasion. If it fails constantly, take a deeper look at your code to find the source of the problem.

    If you find that all tests passed, you may open a pull request upstream.

"},{"location":"contributors/testing/#opening-a-pull-request","title":"Opening A Pull Request","text":""},{"location":"contributors/testing/#draft-mode","title":"Draft Mode","text":"

You may open a pull request in draft mode. All automated tests will still run against the PR, but the PR will not be assigned for review. Once a PR is ready for review, transition it from Draft mode, and code owners will be notified.

"},{"location":"contributors/testing/#conformance-testing","title":"Conformance Testing","text":"

Once a PR has been reviewed and all other tests have passed, a code owner will run a full end-to-end conformance test against the PR. This is usually the last step before merging.

"},{"location":"contributors/testing/#pre-requisites-for-pr-merge","title":"Pre-Requisites for PR Merge","text":"

In order for a PR to be merged, the following conditions should exist:

  1. The PR has passed all the automated tests (style, build & conformance tests).
  2. PR commits have been signed with the --signoff option.
  3. PR was reviewed and approved by a code owner.
  4. PR is rebased against upstream's main branch.
"},{"location":"contributors/testing/#cleanup-the-local-workspace","title":"Cleanup the local workspace","text":"

In order to clean up the local workspace, run make clean. It will clean up all of the intermediate files and directories created during the k0s build. Note that you can't just use git clean -X or even rm -rf, since the Go modules cache sets all of its subdirectories to read-only. If you get in trouble while trying to delete your local workspace, try chmod -R u+w /path/to/workspace && rm -rf /path/to/workspace.

"},{"location":"examples/ambassador-ingress/","title":"Installing Ambassador API Gateway","text":"

You can configure k0s with the Ambassador API Gateway and a MetalLB service loadbalancer. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml file during cluster configuration.

Note: Currently Ambassador API Gateway does not support Kubernetes v1.22 or above. See here for details.

"},{"location":"examples/ambassador-ingress/#use-docker-for-non-native-k0s-platforms","title":"Use Docker for non-native k0s platforms","text":"

With Docker you can run k0s on platforms that the distribution does not natively support (refer to Run k0s in Docker). Skip this section if you are on a platform that k0s natively supports.

As you need to create a custom configuration file to install Ambassador Gateway, you will first need to map that file into the k0s container and to expose the ports Ambassador needs for outside access.

  1. Run k0s under Docker:

    docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
  2. Export the default k0s configuration file:

    docker exec k0s k0s config create > k0s.yaml\n
  3. Export the cluster config, so you can access it using kubectl:

    docker exec k0s cat /var/lib/k0s/pki/admin.conf > k0s-cluster.conf\nexport KUBECONFIG=\"$KUBECONFIG:$PWD/k0s-cluster.conf\"\n
"},{"location":"examples/ambassador-ingress/#configure-k0syaml-for-ambassador-gateway","title":"Configure k0s.yaml for Ambassador Gateway","text":"
  1. Open the k0s.yml file and append the following extensions at the end:

    extensions:\nhelm:\nrepositories:\n- name: datawire\nurl: https://www.getambassador.io\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: ambassador\nchartname: datawire/ambassador\nversion: \"6.5.13\"\nnamespace: ambassador\nvalues: |2\nservice:\nexternalIPs:\n- 172.17.0.2\n- name: metallb\nchartname: bitnami/metallb\nversion: \"1.0.1\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 172.17.0.2\n

    Note: It may be necessary to replace the 172.17.0.2 IP with your local IP address.

    This action adds both Ambassador and MetalLB (required for LoadBalancers) with the corresponding repositories and (minimal) configurations. Be aware that the provided example illustrates the use of your local network and that you will want to provide a range of IPs for MetalLB that are addressable on your LAN to access these services from anywhere on your network.

  2. Stop/remove your k0s container:

    docker stop k0s\ndocker rm k0s\n
  3. Retart your k0s container, this time with additional ports and the above config file mapped into it:

    docker run --name k0s --hostname k0s --privileged -v /var/lib/k0s -v \"$PWD\"/k0s.yaml:/k0s.yaml -p 6443:6443 -p 80:80 -p 443:443 -p 8080:8080 docker.io/k0sproject/k0s:latest\n

    After some time, you will be able to list the Ambassador Services:

    kubectl get services -n ambassador\n

    Output:

    NAME                          TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE\nambassador-1611224811         LoadBalancer   10.99.84.151    172.17.0.2    80:30327/TCP,443:30355/TCP   2m11s\nambassador-1611224811-admin   ClusterIP      10.96.79.130    <none>        8877/TCP                     2m11s\nambassador-1611224811-redis   ClusterIP      10.110.33.229   <none>        6379/TCP                     2m11s\n
  4. Install the Ambassador edgectl tool and run the login command:

    edgectl login --namespace=ambassador localhost\n

    Your browser will open and deeliver you to the Ambassador Console.

"},{"location":"examples/ambassador-ingress/#deploy-map-a-service","title":"Deploy / Map a Service","text":"
  1. Create a YAML file for the service (for example purposes, create a Swagger Petstore service using a petstore.YAML file):

    ---\napiVersion: v1\nkind: Service\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nports:\n- name: http\nport: 80\ntargetPort: 8080\nselector:\napp: petstore\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: petstore\nstrategy:\ntype: RollingUpdate\ntemplate:\nmetadata:\nlabels:\napp: petstore\nspec:\ncontainers:\n- name: petstore-backend\nimage: docker.io/swaggerapi/petstore3:unstable\nports:\n- name: http\ncontainerPort: 8080\n---\napiVersion: getambassador.io/v2\nkind:  Mapping\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nprefix: /petstore/\nservice: petstore\n
  2. Apply the YAML file:

    kubectl apply -f petstore.yaml\n

    Output:

    service/petstore created\ndeployment.apps/petstore created\nmapping.getambassador.io/petstore created\n
  3. Validate that the service is running.

    In the terminal using curl:

    curl -k 'https://localhost/petstore/api/v3/pet/findByStatus?status=available'\n

    Output:

    [{\"id\":1,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":2,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":4,\"category\":{\"id\":1,\"name\":\"Dogs\"},\"name\":\"Dog 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":7,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":8,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":9,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 3\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"},{\"id\":10,\"category\":{\"id\":3,\"name\":\"Rabbits\"},\"name\":\"Rabbit 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"}]\n

    Or by way of your browser:

    Open https://localhost/petstore/ in your browser and change the URL in the field at the top of the page to https://localhost/petstore/api/v3/openapi.json (as it is mapped to the /petstore prefix) and click Explore.

  4. Navigate to the Mappings area in the Ambassador Console to view the corresponding PetStore mapping as configured.

"},{"location":"examples/ansible-playbook/","title":"Creating a cluster with an Ansible Playbook","text":"

Ansible is a popular infrastructure-as-code tool that can use to automate tasks for the purpose of achieving the desired state in a system. With Ansible (and the k0s-Ansible playbook) you can quickly install a multi-node Kubernetes Cluster.

Note: Before using Ansible to create a cluster, you should have a general understanding of Ansible (refer to the official Ansible User Guide.

"},{"location":"examples/ansible-playbook/#prerequisites","title":"Prerequisites","text":"

You will require the following tools to install k0s on local virtual machines:

Tool Detail multipass A lightweight VM manager that uses KVM on Linux, Hyper-V on Windows, and hypervisor.framework on macOS. Installation information ansible An infrastructure as code tool. Installation Guide kubectl Command line tool for running commands against Kubernetes clusters. Kubernetes Install Tools"},{"location":"examples/ansible-playbook/#create-the-cluster","title":"Create the cluster","text":"
  1. Download k0s-ansible

    Clone the k0s-ansible repository on your local machine:

    git clone https://github.com/movd/k0s-ansible.git\ncd k0s-ansible\n
  2. Create virtual machines

    Note: Though multipass is the VM manager in use here, there is no interdependence.

    Create a number of virtual machines. For the automation to work, each instance must have passwordless SSH access. To achieve this, provision each instance with a cloud-init manifest that imports your current users' public SSH key and into a user k0s (refer to the bash script below).

    This creates 7 virtual machines:

    ./tools/multipass_create_instances.sh 7\n
    Create cloud-init to import ssh key...\n[1/7] Creating instance k0s-1 with multipass...\nLaunched: k0s-1\n[2/7] Creating instance k0s-2 with multipass...\nLaunched: k0s-2\n[3/7] Creating instance k0s-3 with multipass...\nLaunched: k0s-3\n[4/7] Creating instance k0s-4 with multipass...\nLaunched: k0s-4\n[5/7] Creating instance k0s-5 with multipass...\nLaunched: k0s-5\n[6/7] Creating instance k0s-6 with multipass...\nLaunched: k0s-6\n[7/7] Creating instance k0s-7 with multipass...\nLaunched: k0s-7\nName State IPv4 Image\nk0s-1 Running 192.168.64.32 Ubuntu 20.04 LTS\nk0s-2 Running 192.168.64.33 Ubuntu 20.04 LTS\nk0s-3 Running 192.168.64.56 Ubuntu 20.04 LTS\nk0s-4 Running 192.168.64.57 Ubuntu 20.04 LTS\nk0s-5 Running 192.168.64.58 Ubuntu 20.04 LTS\nk0s-6 Running 192.168.64.60 Ubuntu 20.04 LTS\nk0s-7 Running 192.168.64.61 Ubuntu 20.04 LTS\n
  3. Create Ansible inventory

    1. Copy the sample to create the inventory directory:

      ```shell\n  cp -rfp inventory/sample inventory/multipass\n  ```\n

    2. Create the inventory.

      Assign the virtual machines to the different host groups, as required by the playbook logic.\n\n  | Host group            | Detail                                    |\n  |:----------------------|:------------------------------------------|\n  | `initial_controller`  | Must contain a single node that creates the worker and controller tokens needed by the other nodes|\n  | `controller`          | Can contain nodes that, together with the host from `initial_controller`, form a highly available isolated control plane |\n  | `worker`              | Must contain at least one node, to allow for the deployment of Kubernetes objects |\n

    3. Fill in inventory/multipass/inventory.yml. This can be done by direct entry using the metadata provided by multipass list,, or you can use the following Python script multipass_generate_inventory.py:

      ```shell\n  ./tools/multipass_generate_inventory.py\n  ```\n\n  ```shell\n  Designate first three instances as control plane\n  Created Ansible Inventory at: /Users/dev/k0s-ansible/tools/inventory.yml\n  $ cp tools/inventory.yml inventory/multipass/inventory.yml\n  ```\n\n  Your `inventory/multipass/inventory.yml` should resemble the example below:\n\n  ```yaml\n  ---\n  all:\n    children:\n      initial_controller:\n        hosts:\n          k0s-1:\n      controller:\n        hosts:\n          k0s-2:\n          k0s-3:\n      worker:\n        hosts:\n          k0s-4:\n          k0s-5:\n          k0s-6:\n          k0s-7:\n    hosts:\n      k0s-1:\n        ansible_host: 192.168.64.32\n      k0s-2:\n        ansible_host: 192.168.64.33\n      k0s-3:\n        ansible_host: 192.168.64.56\n      k0s-4:\n        ansible_host: 192.168.64.57\n      k0s-5:\n        ansible_host: 192.168.64.58\n      k0s-6:\n        ansible_host: 192.168.64.60\n      k0s-7:\n        ansible_host: 192.168.64.61\n    vars:\n      ansible_user: k0s\n  ```\n
  4. Test the virtual machine connections

    Run the following command to test the connection to your hosts:

    ansible -i inventory/multipass/inventory.yml -m ping\n
    k0s-4 | SUCCESS => {\n\"ansible_facts\": {\n\"discovered_interpreter_python\": \"/usr/bin/python3\"\n},\n    \"changed\": false,\n    \"ping\": \"pong\"\n}\n...\n

    If the test result indicates success, you can proceed.

  5. Provision the cluster with Ansible

    Applying the playbook, k0s download and be set up on all nodes, tokens will be exchanged, and a kubeconfig will be dumped to your local deployment environment.

    ansible-playbook site.yml -i inventory/multipass/inventory.yml\n
    TASK [k0s/initial_controller : print kubeconfig command] *******************************************************\nTuesday 22 December 2020  17:43:20 +0100 (0:00:00.257)       0:00:41.287 ******\nok: [k0s-1] => {\n\"msg\": \"To use Cluster: export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\"\n}\n...\nPLAY RECAP *****************************************************************************************************\nk0s-1                      : ok=21   changed=11   unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-2                      : ok=10   changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-3                      : ok=10   changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-4                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-5                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-6                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-7                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\n\nTuesday 22 December 2020  17:43:36 +0100 (0:00:01.204)       0:00:57.478 ******\n===============================================================================\nprereq : Install apt packages -------------------------------------------------------------------------- 22.70s\nk0s/controller : Wait for k8s apiserver ----------------------------------------------------------------- 4.30s\nk0s/initial_controller : Create worker join token ------------------------------------------------------- 3.38s\nk0s/initial_controller : Wait for k8s apiserver --------------------------------------------------------- 3.36s\ndownload : Download k0s binary k0s-v0.9.0-rc1-amd64 ----------------------------------------------------- 3.11s\nGathering Facts ----------------------------------------------------------------------------------------- 2.85s\nGathering Facts ----------------------------------------------------------------------------------------- 1.95s\nprereq : Create k0s Directories ------------------------------------------------------------------------- 1.53s\nk0s/worker : Enable and check k0s service --------------------------------------------------------------- 1.20s\nprereq : Write the k0s config file ---------------------------------------------------------------------- 1.09s\nk0s/initial_controller : Enable and check k0s service --------------------------------------------------- 0.94s\nk0s/controller : Enable and check k0s service ----------------------------------------------------------- 0.73s\nGathering Facts ----------------------------------------------------------------------------------------- 0.71s\nGathering Facts ----------------------------------------------------------------------------------------- 0.66s\nGathering Facts ----------------------------------------------------------------------------------------- 0.64s\nk0s/worker : Write the k0s token file on worker --------------------------------------------------------- 0.64s\nk0s/worker : Copy k0s service file ---------------------------------------------------------------------- 0.53s\nk0s/controller : Write the k0s token file on controller ------------------------------------------------- 0.41s\nk0s/controller : Copy k0s service file ------------------------------------------------------------------ 0.40s\nk0s/initial_controller : Copy k0s service file ---------------------------------------------------------- 0.36s\n
"},{"location":"examples/ansible-playbook/#use-the-cluster-with-kubectl","title":"Use the cluster with kubectl","text":"

A kubeconfig was copied to your local machine while the playbook was running which you can use to gain access to your new Kubernetes cluster:

export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\nkubectl cluster-info\n
Kubernetes control plane is running at https://192.168.64.32:6443\nCoreDNS is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\n\n$ kubectl get nodes -o wide\nNAME    STATUS     ROLES    AGE   VERSION        INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION     CONTAINER-RUNTIME\nk0s-4   Ready      <none>   21s   v1.20.1-k0s1   192.168.64.57   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-5   Ready      <none>   21s   v1.20.1-k0s1   192.168.64.58   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-6   NotReady   <none>   21s   v1.20.1-k0s1   192.168.64.60   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-7   NotReady   <none>   21s   v1.20.1-k0s1   192.168.64.61   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\n

Note: The first three control plane nodes will not display, as the control plane is fully isolated. To check on the distributed etcd cluster, you can use ssh to securely log a controller node, or you can run the following ad-hoc command:

ansible k0s-1 -a \"k0s etcd member-list -c /etc/k0s/k0s.yaml\" -i inventory/multipass/inventory.yml | tail -1 | jq\n
{\n\"level\": \"info\",\n\"members\": {\n\"k0s-1\": \"https://192.168.64.32:2380\",\n\"k0s-2\": \"https://192.168.64.33:2380\",\n\"k0s-3\": \"https://192.168.64.56:2380\"\n},\n\"msg\": \"done\",\n\"time\": \"2020-12-23T00:21:22+01:00\"\n}\n

Once all worker nodes are at Ready state you can use the cluster. You can test the cluster state by creating a simple nginx deployment.

kubectl create deployment nginx --image=gcr.io/google-containers/nginx --replicas=5\n
deployment.apps/nginx created\n
kubectl expose deployment nginx --target-port=80 --port=8100\n
service/nginx exposed\n
kubectl run hello-k0s --image=quay.io/prometheus/busybox --rm -it --restart=Never --command -- wget -qO- nginx:8100\n
<!DOCTYPE html>\n<html>\n<head>\n<title>Welcome to nginx on Debian!</title>\n...\npod \"hello-k0s\" deleted\n

Note: k0s users are the developers of k0s-ansible. Please send your feedback, bug reports, and pull requests to github.com/movd/k0s-ansible._

"},{"location":"examples/gitops-flux/","title":"Using GitOps with Flux","text":"

This tutorial describes the benefits of using GitOps with k0s and provides an example of deploying an application with Flux v2.

GitOps is a practice where you leverage Git as the single source of truth. It offers a declarative way to do Kubernetes cluster management and application delivery. The desired states, using Kubernetes manifests and helm packages, are pulled from a git repository and automatically deployed to the cluster. This also makes it quick to re-deploy and recover applications whenever needed.

"},{"location":"examples/gitops-flux/#why-gitops-with-k0s","title":"Why GitOps with k0s","text":"

k0s doesn't come with a lot of different extensions and add-ons that some users might find useful (and some not). Instead, k0s comes with 100% upstream Kubernetes and is compatible with all Kubernetes extensions. This makes it easy for k0s users to freely select the needed extensions that their applications and infrastructure need, without conflicting to any predefined options. Now, GitOps is a perfect practice to deploy these extensions automatically with applications by defining and configuring them directly in Git. This will also help with cluster security as the cluster doesn't need to be accessed directly when application changes are needed. However, this puts more stress on the Git access control, because changes in Git are propagated automatically to the cluster.

"},{"location":"examples/gitops-flux/#install-k0s","title":"Install k0s","text":"

Let's start by installing k0s. Any k0s deployment option will do, but to keep things simple, this Quick Start Guide gets you started with a single node k0s cluster.

Run these three commands to download k0s, install and start it:

curl -sSLf https://get.k0s.sh | sudo sh\nsudo k0s install controller --single\nsudo k0s start\n
"},{"location":"examples/gitops-flux/#set-kubeconfig","title":"Set kubeconfig","text":"

Next, you need to set the KUBECONFIG variable, which is needed by Flux CLI later on.

sudo k0s kubeconfig admin > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\n
"},{"location":"examples/gitops-flux/#install-flux","title":"Install Flux","text":"

To proceed with Flux, install the Flux CLI, which is used for configuring Flux to your Kubernetes cluster. For macOS and Linux, this can be done either with brew or bash script. Use one of them:

brew install fluxcd/tap/flux\n

or

curl -s https://fluxcd.io/install.sh | sudo bash\n

For more details of the Flux installation, check the Flux documentation.

"},{"location":"examples/gitops-flux/#configure-flux-for-a-github-repository","title":"Configure Flux for a GitHub repository","text":"

Export your GitHub personal access token (instructions how to get it) and username:

export GITHUB_TOKEN=<your-token>\nexport GITHUB_USER=<your-username>\n

Come up with a GitHub repo name (e.g. flux-demo), which will be used by Flux to store (and sync) the config files.

export GITHUB_REPO_NAME=<select-repo-name-to-be-created>\n

Bootstrap flux to your cluster. The GitHub repo will be created automatically by Flux:

flux bootstrap github \\\n--owner=$GITHUB_USER \\\n--repository=$GITHUB_REPO_NAME \\\n--branch=main \\\n--path=./clusters/my-cluster \\\n--personal\n

Now you are all set with Flux and can proceed to deploy your first application.

"},{"location":"examples/gitops-flux/#deploy-example-application","title":"Deploy example application","text":"

Next, we'll deploy a simple web application and expose it using a NodePort service. In the previous step, we configured Flux to track the path /clusters/my-cluster/ in your repository. Now clone the repo to your local machine:

git clone git@github.com:$GITHUB_USER/$GITHUB_REPO_NAME.git\ncd $GITHUB_REPO_NAME/clusters/my-cluster/\n

Create the following YAML file (simple-web-server-with-nodeport.yaml) into the same directory:

apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\ntype: NodePort\nselector:\napp: web\nports:\n- port: 80\ntargetPort: 80\nnodePort: 30003\n

Then push the new file to the repository:

git add .\ngit commit -m \"Add web server manifest\"\ngit push\n

Check that Flux detects your changes and the web server gets applied (by default this should happen within 1 min):

flux get kustomizations\n

If the deployment went successfully, you should see the newly added objects:

sudo k0s kubectl get all -n web\n

You can try to access the web application using

curl localhost:30003\n

or by using a web browser http://localhost:30003.

Voil\u00e0! You have now installed the example application using the GitOps method with Flux. As a next step you can try to modify the web app YAML file or add another application directly in to the Git repo and see how Flux will automatically pick up the changes without accessing the cluster with kubectl.

"},{"location":"examples/gitops-flux/#uninstall-flux","title":"Uninstall Flux","text":"

If you want to uninstall Flux from the cluster, run:

flux uninstall --namespace=flux-system\n

Your applications, which were installed by Flux, will remain in the cluster, but you don't have the Flux processes anymore to sync up the desired state from Git.

"},{"location":"examples/metallb-loadbalancer/","title":"Installing MetalLB Load Balancer","text":"

This tutorial covers the installation of MetalLB load balancer on k0s. k0s doesn't come with an in-built load balancer, but it's easy to deploy MetalLB as shown in this document.

"},{"location":"examples/metallb-loadbalancer/#about-load-balancers","title":"About Load Balancers","text":"

Load balancers can be used for exposing applications to the external network. Load balancer provides a single IP address to route incoming requests to your app. In order to successfully create Kubernetes services of type LoadBalancer, you need to have the load balancer (implementation) available for Kubernetes.

Load balancer can be implemented by a cloud provider as an external service (with additional cost). This can also be implemented internally in the Kubernetes cluster (pure SW solution) with MetalLB.

"},{"location":"examples/metallb-loadbalancer/#metallb","title":"MetalLB","text":"

MetalLB implements the Kubernetes service of type LoadBalancer. When a LoadBalancer service is requested, MetalLB allocates an IP address from the configured range and makes the network aware that the IP \u201clives\u201d in the cluster.

One of the benefits of MetalLB is that you avoid all cloud provider dependencies. That's why MetalLB is typically used for bare-metal deployments.

See the MetalLB requirements in the MetalLB's official documentation. By default, k0s runs with Kube-Router CNI, which is compatible with MetalLB as long as you don't use MetalLB\u2019s BGP mode. If you are not using Kube-Router and you are using kube-proxy in IPVS mode, you need to enable strict ARP mode in kube-proxy (see MetalLB preparations):

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nnetwork:\nkubeProxy:\nmode: ipvs\nipvs:\nstrictARP: true\n

Port 7946 (TCP & UDP) must be allowed between the nodes. In addition, before installing MetalLB, make sure there is no other software running on port 7946 on the nodes, such as docker daemon.

"},{"location":"examples/metallb-loadbalancer/#install-metallb","title":"Install MetalLB","text":"
  1. Install MetalLB using the official Helm chart and k0s Helm extension manager:

    apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\n  metadata:\nname: k0s\nspec:\n  extensions:\n    helm:\n      repositories:\n      - name: metallb\n        url: https://metallb.github.io/metallb\n      charts:\n      - name: metallb\n        chartname: metallb/metallb\n        namespace: metallb\n

    Other installation methods are available in the MetalLB's official documentation.

  2. Create ConfigMap for MetalLB

    Next you need to create ConfigMap, which includes an IP address range for the load balancer. The pool of IPs must be dedicated to MetalLB's use. You can't reuse for example the Kubernetes node IPs or IPs controlled by other services. You can, however, use private IP addresses, for example 192.168.1.180-192.168.1.199, but then you need to take care of the routing from the external network if you need external access. In this example, we don't need it.

    Create a YAML file accordingly, and deploy it: kubectl apply -f metallb-l2-pool.yaml

    ---\napiVersion: metallb.io/v1beta1\nkind: IPAddressPool\nmetadata:\nname: first-pool\nnamespace: metallb-system\nspec:\naddresses:\n- <ip-address-range-start>-<ip-address-range-stop>\n---\napiVersion: metallb.io/v1beta1\nkind: L2Advertisement\nmetadata:\nname: example\nnamespace: metallb-system\n
  3. Deploy an example application (web server) with a load balancer

    apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
  4. Check your LoadBalancer

    Run the following command to see your LoadBalancer with the external-ip and port.

    kubectl get service -n web\n
  5. Access your example application

    If you used private IP addresses for MetalLB in the ConfigMap (in step 2), you should run the following command from the local network. Use the IP address from the previous step.

    curl <EXTERNAL-IP>\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/metallb-loadbalancer/#additional-information","title":"Additional information","text":"

For more information about MetalLB installation, take a look at the official MetalLB documentation.

"},{"location":"examples/metallb-loadbalancer/#alternative-examples","title":"Alternative examples","text":"

Get load balancer using cloud provider.

"},{"location":"examples/nginx-ingress/","title":"Installing NGINX Ingress Controller","text":"

This tutorial covers the installation of NGINX Ingress controller, which is an open source project made by the Kubernetes community. k0s doesn't come with an in-built Ingress controller, but it's easy to deploy NGINX Ingress as shown in this document. Other Ingress solutions can be used as well (see the links at the end of the page).

"},{"location":"examples/nginx-ingress/#nodeport-vs-loadbalancer-vs-ingress-controller","title":"NodePort vs LoadBalancer vs Ingress controller","text":"

Kubernetes offers multiple options for exposing services to external networks. The main options are NodePort, LoadBalancer and Ingress controller.

NodePort, as the name says, means that a port on a node is configured to route incoming requests to a certain service. The port range is limited to 30000-32767, so you cannot expose commonly used ports like 80 or 443 with NodePort.

LoadBalancer is a service, which is typically implemented by the cloud provider as an external service (with additional cost). Load balancers can also be installed internally in the Kubernetes cluster with MetalLB, which is typically used for bare-metal deployments. Load balancer provides a single IP address to access your services, which can run on multiple nodes.

Ingress controller helps to consolidate routing rules of multiple applications into one entity. Ingress controller is exposed to an external network with the help of NodePort, LoadBalancer or host network. You can also use Ingress controller to terminate TLS for your domain in one place, instead of terminating TLS for each application separately.

"},{"location":"examples/nginx-ingress/#nginx-ingress-controller","title":"NGINX Ingress Controller","text":"

NGINX Ingress Controller is a very popular Ingress for Kubernetes. In many cloud environments, it can be exposed to an external network by using the load balancer offered by the cloud provider. However, cloud load balancers are not necessary. Load balancer can also be implemented with MetalLB, which can be deployed in the same Kubernetes cluster. Another option to expose the Ingress controller to an external network is to use NodePort. The third option is to use host network. All of these alternatives are described in more detail on below, with separate examples.

"},{"location":"examples/nginx-ingress/#install-nginx-using-nodeport","title":"Install NGINX using NodePort","text":"

Installing NGINX using NodePort is the most simple example for Ingress Controller as we can avoid the load balancer dependency. NodePort is used for exposing the NGINX Ingress to the external network.

  1. Install NGINX Ingress Controller (using the official manifests by the ingress-nginx project)

    kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
  2. Check that the Ingress controller pods have started

    kubectl get pods -n ingress-nginx\n
  3. Check that you can see the NodePort service

    kubectl get services -n ingress-nginx\n
  4. From version v1.0.0 of the Ingress-NGINX Controller, a ingressclass object is required.

    In the default installation, an ingressclass object named nginx has already been created.

    $ kubectl -n ingress-nginx get ingressclasses\nNAME    CONTROLLER             PARAMETERS   AGE\nnginx   k8s.io/ingress-nginx   <none>       162m\n

    If this is only instance of the Ingresss-NGINX controller, you should add the annotation ingressclass.kubernetes.io/is-default-class in your ingress class:

    kubectl -n ingress-nginx annotate ingressclasses nginx ingressclass.kubernetes.io/is-default-class=\"true\"\n
  5. Try connecting the Ingress controller using the NodePort from the previous step (in the range of 30000-32767)

    curl <worker-external-ip>:<node-port>\n

    If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it.

  6. Deploy a small test application (httpd web server) to verify your Ingress controller.

    Create the following YAML file and name it \"simple-web-server-with-ingress.yaml\":

    apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 5000\ntargetPort: 80\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: web-server-ingress\nnamespace: web\nspec:\ningressClassName: nginx\nrules:\n- host: web.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: web-server-service\nport:\nnumber: 5000\n

    Deploy the app:

    kubectl apply -f simple-web-server-with-ingress.yaml\n
  7. Verify that you can access your application using the NodePort from step 3.

    curl <worker-external-ip>:<node-port> -H 'Host: web.example.com'\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/nginx-ingress/#install-nginx-using-loadbalancer","title":"Install NGINX using LoadBalancer","text":"

In this example you'll install NGINX Ingress controller using LoadBalancer on k0s.

  1. Install LoadBalancer

    There are two alternatives to install LoadBalancer on k0s. Follow the links in order to install LoadBalancer.

    - MetalLB as a pure SW solution running internally in the k0s cluster - Cloud provider's load balancer running outside of the k0s cluster

  2. Verify LoadBalancer

    In order to proceed you need to have a load balancer available for the Kubernetes cluster. To verify that it's available, deploy a simple load balancer service.

    apiVersion: v1\nkind: Service\nmetadata:\nname: example-load-balancer\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
    kubectl apply -f example-load-balancer.yaml\n

    Then run the following command to see your LoadBalancer with an external IP address.

    kubectl get service example-load-balancer\n

    If the LoadBalancer is not available, you won't get an IP address for EXTERNAL-IP. Instead, it's <pending>. In this case you should go back to the previous step and check your load balancer availability.

    If you are successful, you'll see a real IP address and you can proceed further.

    You can delete the example-load-balancer:

    kubectl delete -f example-load-balancer.yaml\n
  3. Install NGINX Ingress Controller by following the steps in the previous chapter (step 1 to step 4).

  4. Edit the NGINX Ingress Controller to use LoadBalancer instead of NodePort

    kubectl edit service ingress-nginx-controller -n ingress-nginx\n

    Find the spec.type field and change it from \"NodePort\" to \"LoadBalancer\".

  5. Check that you can see the ingress-nginx-controller with type LoadBalancer.

    kubectl get services -n ingress-nginx\n
  6. Try connecting to the Ingress controller

    If you used private IP addresses for MetalLB in step 2, you should run the following command from the local network. Use the IP address from the previous step, column EXTERNAL-IP.

    curl <EXTERNAL-IP>\n

    If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it using LoadBalancer.

  7. Deploy a small test application (httpd web server) to verify your Ingress.

    Create the YAML file \"simple-web-server-with-ingress.yaml\" as described in the previous chapter (step 6) and deploy it.

    kubectl apply -f simple-web-server-with-ingress.yaml\n
  8. Verify that you can access your application through the LoadBalancer and Ingress controller.

    curl <worker-external-ip> -H 'Host: web.example.com'\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/nginx-ingress/#install-nginx-using-host-network","title":"Install NGINX using host network","text":"

The host network option exposes Ingress directly using the worker nodes' IP addresses. It also allows you to use ports 80 and 443. This option doesn't use any Service objects (ClusterIP, NodePort, LoadBalancer) and it has the limitation that only one Ingress controller Pod may be scheduled on each cluster node.

  1. Download the official NGINX Ingress Controller manifests:

    wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
  2. Edit deploy.yaml. Find the Deployment ingress-nginx-controller and enable the host network option by adding the hostNetwork line:

    spec:\n  template:\n    spec:\n      hostNetwork: true\n

    You can also remove the Service ingress-nginx-controller completely, because it won't be needed.

  3. Install Ingress

    kubectl apply -f deploy.yaml\n
  4. Try to connect to the Ingress controller, deploy a test application and verify the access. These steps are similar to the previous install methods.

"},{"location":"examples/nginx-ingress/#additional-information","title":"Additional information","text":"

For more information about NGINX Ingress Controller installation, take a look at the official ingress-nginx installation guide and bare-metal considerations.

"},{"location":"examples/nginx-ingress/#alternative-examples-for-ingress-controllers-on-k0s","title":"Alternative examples for Ingress Controllers on k0s","text":"

Traefik Ingress

"},{"location":"examples/rook-ceph/","title":"Installing Ceph Storage with Rook","text":"

In this tutorial you'll create a Ceph storage for k0s. Ceph is a highly scalable, distributed storage solution. It offers object, block, and file storage, and it's designed to run on any common hardware. Ceph implements data replication into multiple volumes that makes it fault-tolerant. Another clear advantage of Ceph in Kubernetes is the dynamic provisioning. This means that applications just need to request the storage (persistent volume claim) and Ceph will automatically provision the requested storage without a manual creation of the persistent volume each time.

Unfortunately, the Ceph deployment as such can be considered a bit complex. To make the deployment easier, we'll use Rook operator. Rook is a CNCF project and it's dedicated to storage orchestration. Rook supports several storage solutions, but in this tutorial we will use it to manage Ceph.

This tutorial uses three worker nodes and one controller. It's possible to use less nodes, but using three worker nodes makes it a good example for deploying a high-available storage cluster. We use external storage partitions, which are assigned to the worker nodes to be used by Ceph.

After the Ceph deployment we'll deploy a sample application (MongoDB) to use the storage in practice.

"},{"location":"examples/rook-ceph/#prerequisites","title":"Prerequisites","text":"
  • Linux OS
  • GitHub access
  • AWS account
  • Terraform
"},{"location":"examples/rook-ceph/#deployment-steps","title":"Deployment steps","text":""},{"location":"examples/rook-ceph/#1-preparations","title":"1. Preparations","text":"

In this example we'll use Terraform to create four Ubuntu VMs on AWS. Using Terraform makes the VM deployment fast and repeatable. You can avoid manually setting up everything in the AWS GUI. Moreover, when you have finished with the tutorial, it's very easy to tear down the VMs with Terraform (with one command). However, you can set up the nodes in many different ways and it doesn't make a difference in the following steps.

We will use k0sctl to create the k0s cluster. k0sctl repo also includes a ready-made Terraform configuration to create the VMs on AWS. We'll use that. Let's start be cloning the k0sctl repo.

git clone git@github.com:k0sproject/k0sctl.git\n

Take a look at the Terraform files

cd k0sctl/examples/aws-tf\nls -l\n

Open variables.tf and set the number of controller and worker nodes like this:

variable \"cluster_name\" {\ntype    = string\ndefault = \"k0sctl\"\n}\n\nvariable \"controller_count\" {\ntype    = number\ndefault = 1\n}\n\nvariable \"worker_count\" {\ntype    = number\ndefault = 3\n}\n\nvariable \"cluster_flavor\" {\ntype    = string\ndefault = \"t3.small\"\n}\n

Open main.tf to check or modify k0s version near the end of the file.

You can also configure a different name to your cluster and change the default VM type. t3.small (2 vCPUs, 2 GB RAM) runs just fine for this tutorial.

"},{"location":"examples/rook-ceph/#2-create-the-vms","title":"2. Create the VMs","text":"

For AWS, you need an account. Terraform will use the following environment variable: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN. You can easily copy-paste them from the AWS portal. For more information, see the AWS documentation.

When the environment variables are set, you can proceed with Terraform and deploy the VMs.

terraform init\nterraform apply\n

If you decide to create the VMs manually using AWS GUI, you need to disable source / destination checking. This needs to be disbled always for multi-node Kubernetes clusters in order to get the node-to-node communication working due to Network Address Translation. For Terraform this is already taken care of in the default configuration.

"},{"location":"examples/rook-ceph/#3-create-and-attach-the-volumes","title":"3. Create and attach the volumes","text":"

Ceph requires one of the following storage options for storing the data:

  • Raw devices (no partitions or formatted filesystems)
  • Raw partitions (no formatted filesystem)
  • PVs available from a storage class in block mode

We will be using raw partititions (AWS EBS volumes), which can be easily attached to the worker node VMs. They are automatically detected by Ceph with its default configuration.

Deploy AWS EBS volumes, one for each worker node. You can manually create three EBS volumes (for example 10 GB each) using the AWS GUI and attach those to your worker nodes. Formatting shouldn't be done. Instead, Ceph handles that part automatically.

After you have attached the EBS volumes to the worker nodes, log in to one of the workers and check the available block devices:

lsblk -f\n
NAME        FSTYPE   LABEL           UUID                                 FSAVAIL FSUSE% MOUNTPOINT\nloop0       squashfs                                                            0   100% /snap/amazon-ssm-agent/3552\nloop1       squashfs                                                            0   100% /snap/core18/1997\nloop2       squashfs                                                            0   100% /snap/snapd/11588\nloop3       squashfs                                                            0   100% /snap/lxd/19647\nnvme0n1\n\u2514\u2500nvme0n1p1 ext4     cloudimg-rootfs e8070c31-bfee-4314-a151-d1332dc23486    5.1G    33% /\nnvme1n1\n

The last line (nvme1n1) in this example printout corresponds to the attached EBS volume. Note that it doesn't have any filesystem (FSTYPE is empty). This meets the Ceph storage requirements and you are good to proceed.

"},{"location":"examples/rook-ceph/#4-install-k0s-using-k0sctl","title":"4. Install k0s using k0sctl","text":"

You can use terraform to automatically output a config file for k0sctl with the ip addresses and access details.

terraform output -raw k0s_cluster > k0sctl.yaml\n

After that deploying k0s becomes very easy with the ready-made configuration.

k0sctl apply --config k0sctl.yaml\n

It might take around 2-3 minutes for k0sctl to connect each node, install k0s and connect the nodes together to form a cluster.

"},{"location":"examples/rook-ceph/#5-access-k0s-cluster","title":"5. Access k0s cluster","text":"

To access your new cluster remotely, you can use k0sctl to fetch kubeconfig and use that with kubectl or Lens.

k0sctl kubeconfig --config k0sctl.yaml > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\nkubectl get nodes\n

The other option is to login to your controller node and use the k0s in-built kubectl to access the cluster. Then you don't need to worry about kubeconfig (k0s takes care of that automatically).

ssh -i aws.pem <username>@<ip-address>\nsudo k0s kubectl get nodes\n
"},{"location":"examples/rook-ceph/#6-deploy-rook","title":"6. Deploy Rook","text":"

To get started with Rook, let's first clone the Rook GitHub repo:

git clone --single-branch --branch release-1.7 https://github.com/rook/rook.git\ncd rook/cluster/examples/kubernetes/ceph\n

We will use mostly the default Rook configuration. However, k0s kubelet drectory must be configured in operator.yaml like this

ROOK_CSI_KUBELET_DIR_PATH: \"/var/lib/k0s/kubelet\"\n

To create the resources, which are needed by the Rook\u2019s Ceph operator, run

kubectl apply -f crds.yaml -f common.yaml -f operator.yaml\n

Now you should see the operator running. Check them with

kubectl get pods -n rook-ceph\n
"},{"location":"examples/rook-ceph/#7-deploy-ceph-cluster","title":"7. Deploy Ceph Cluster","text":"

Then you can proceed to create a Ceph cluster. Ceph will use the three EBS volumes attached to the worker nodes:

kubectl apply -f cluster.yaml\n

It takes some minutes to prepare the volumes and create the cluster. Once this is completed you should see the following output:

kubectl get pods -n rook-ceph\n
NAME                                                         READY   STATUS      RESTARTS   AGE\ncsi-cephfsplugin-nhxc8                                       3/3     Running     0          2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-ldhjp                 6/6     Running     0          2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-sxfm8                 6/6     Running     0          2m48s\ncsi-cephfsplugin-tj2bh                                       3/3     Running     0          2m48s\ncsi-cephfsplugin-z2rrl                                       3/3     Running     0          2m48s\ncsi-rbdplugin-5q7gq                                          3/3     Running     0          2m49s\ncsi-rbdplugin-8sfpd                                          3/3     Running     0          2m49s\ncsi-rbdplugin-f2xdz                                          3/3     Running     0          2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-g6vck                    6/6     Running     0          2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-zpmvr                    6/6     Running     0          2m49s\nrook-ceph-crashcollector-ip-172-31-0-76-64cb4c7775-m55x2     1/1     Running     0          45s\nrook-ceph-crashcollector-ip-172-31-13-183-654b46588d-djqsd   1/1     Running     0          2m57s\nrook-ceph-crashcollector-ip-172-31-15-5-67b68698f-gcjb7      1/1     Running     0          2m46s\nrook-ceph-mgr-a-5ffc65c874-8pxgv                             1/1     Running     0          58s\nrook-ceph-mon-a-ffcd85c5f-z89tb                              1/1     Running     0          2m59s\nrook-ceph-mon-b-fc8f59464-lgczk                              1/1     Running     0          2m46s\nrook-ceph-mon-c-69bd87b558-kl4nl                             1/1     Running     0          91s\nrook-ceph-operator-54cf7487d4-pl66p                          1/1     Running     0          4m57s\nrook-ceph-osd-0-dd4fd8f6-g6s9m                               1/1     Running     0          48s\nrook-ceph-osd-1-7c478c49c4-gkqml                             1/1     Running     0          47s\nrook-ceph-osd-2-5b887995fd-26492                             1/1     Running     0          46s\nrook-ceph-osd-prepare-ip-172-31-0-76-6b5fw                   0/1     Completed   0          28s\nrook-ceph-osd-prepare-ip-172-31-13-183-cnkf9                 0/1     Completed   0          25s\nrook-ceph-osd-prepare-ip-172-31-15-5-qc6pt                   0/1     Completed   0          23s\n
"},{"location":"examples/rook-ceph/#8-configure-ceph-block-storage","title":"8. Configure Ceph block storage","text":"

Before Ceph can provide storage to your cluster, you need to create a ReplicaPool and a StorageClass. In this example, we use the default configuration to create the block storage.

kubectl apply -f ./csi/rbd/storageclass.yaml\n
"},{"location":"examples/rook-ceph/#9-request-storage","title":"9. Request storage","text":"

Create a new manifest file mongo-pvc.yaml with the following content:

apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: mongo-pvc\nspec:\nstorageClassName: rook-ceph-block\naccessModes:\n- ReadWriteOnce\nresources:\nrequests:\nstorage: 2Gi\n

This will create Persistent Volume Claim (PVC) to request a 2 GB block storage from Ceph. Provioning will be done dynamically. You can define the block size freely as long as it fits to the available storage size.

kubectl apply -f mongo-pvc.yaml\n

You can now check the status of your PVC:

kubectl get pvc\n

When the PVC gets the requested volume reserved (bound), it should look like this:

kubectl get pvc\n
NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE\nmongo-pvc   Bound    pvc-08337736-65dd-49d2-938c-8197a8871739   2Gi        RWO            rook-ceph-block   6s\n
"},{"location":"examples/rook-ceph/#10-deploy-an-example-application","title":"10. Deploy an example application","text":"

Let's deploy a Mongo database to verify the Ceph storage. Create a new file mongo.yaml with the following content:

apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: mongo\nspec:\nselector:\nmatchLabels:\napp: mongo\ntemplate:\nmetadata:\nlabels:\napp: mongo\nspec:\ncontainers:\n- image: mongo:4.0\nname: mongo\nports:\n- containerPort: 27017\nname: mongo\nvolumeMounts:\n- name: mongo-persistent-storage\nmountPath: /data/db\nvolumes:\n- name: mongo-persistent-storage\npersistentVolumeClaim:\nclaimName: mongo-pvc\n

Deploy the database:

kubectl apply -f mongo.yaml\n
"},{"location":"examples/rook-ceph/#11-access-the-application","title":"11. Access the application","text":"

Open the MongoDB shell using the mongo pod:

kubectl get pods\n
NAME                    READY   STATUS    RESTARTS   AGE\nmongo-b87cbd5cc-4wx8t   1/1     Running   0          76s\n
kubectl exec -it mongo-b87cbd5cc-4wx8t -- mongo\n

Create a DB and insert some data:

> use testDB\nswitched to db testDB\n> db.testDB.insertOne( {name: \"abc\", number: 123  })\n{\n  \"acknowledged\" : true,\n  \"insertedId\" : ObjectId(\"60815690a709d344f83b651d\")\n}\n> db.testDB.insertOne( {name: \"bcd\", number: 234  })\n{\n  \"acknowledged\" : true,\n  \"insertedId\" : ObjectId(\"6081569da709d344f83b651e\")\n}\n

Read the data:

> db.getCollection(\"testDB\").find()\n{ \"_id\" : ObjectId(\"60815690a709d344f83b651d\"), \"name\" : \"abc\", \"number\" : 123 }\n{ \"_id\" : ObjectId(\"6081569da709d344f83b651e\"), \"name\" : \"bcd\", \"number\" : 234 }\n>\n

You can also try to restart the mongo pod or restart the worker nodes to verity that the storage is persistent.

"},{"location":"examples/rook-ceph/#12-clean-up","title":"12. Clean-up","text":"

You can use Terraform to take down the VMs:

terraform destroy\n

Remember to delete the EBS volumes separately.

"},{"location":"examples/rook-ceph/#conclusions","title":"Conclusions","text":"

You have now created a replicated Ceph storage for k0s. All you data is stored to multiple disks at the same time so you have a fault-tolerant solution. You also have enabled dynamic provisioning. Your applications can request the available storage without a manual creation of the persistent volumes each time.

This was just one example to deploy distributed storage to k0s cluster using an operator. You can easily use different Kubernetes storage solutions with k0s.

"},{"location":"examples/traefik-ingress/","title":"Installing Traefik Ingress Controller","text":"

You can configure k0s with the Traefik ingress controller, a MetalLB service loadbalancer, and deploy the Traefik Dashboard using a service sample. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml file during cluster configuration.

"},{"location":"examples/traefik-ingress/#1-configure-k0syaml","title":"1. Configure k0s.yaml","text":"

Configure k0s to install Traefik and MetalLB during cluster bootstrapping by adding their Helm charts as extensions in the k0s configuration file (k0s.yaml).

Note:

A good practice is to have a small range of IP addresses that are addressable on your network, preferably outside the assignment pool your DHCP server allocates (though any valid IP range should work locally on your machine). Providing an addressable range allows you to access your load balancer and Ingress services from anywhere on your local network.

extensions:\nhelm:\nrepositories:\n- name: traefik\nurl: https://traefik.github.io/charts\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: traefik\nchartname: traefik/traefik\nversion: \"20.5.3\"\nnamespace: default\n- name: metallb\nchartname: bitnami/metallb\nversion: \"2.5.4\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 192.168.0.5-192.168.0.10\n
"},{"location":"examples/traefik-ingress/#2-retrieve-the-load-balancer-ip","title":"2. Retrieve the Load Balancer IP","text":"

After you start your cluster, run kubectl get all to confirm the deployment of Traefik and MetalLB. The command should return a response with the metallb and traefik resources, along with a service load balancer that has an assigned EXTERNAL-IP.

kubectl get all\n

Output:

NAME                                                 READY   STATUS    RESTARTS   AGE\npod/metallb-1607085578-controller-864c9757f6-bpx6r   1/1     Running   0          81s\npod/metallb-1607085578-speaker-245c2                 1/1     Running   0          60s\npod/traefik-1607085579-77bbc57699-b2f2t              1/1     Running   0          81s\n\nNAME                         TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                      AGE\nservice/kubernetes           ClusterIP      10.96.0.1        <none>           443/TCP                      96s\nservice/traefik-1607085579   LoadBalancer   10.105.119.102   192.168.0.5      80:32153/TCP,443:30791/TCP   84s\n\nNAME                                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE\ndaemonset.apps/metallb-1607085578-speaker   1         1         1       1            1           kubernetes.io/os=linux   87s\n\nNAME                                            READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/metallb-1607085578-controller   1/1     1            1           87s\ndeployment.apps/traefik-1607085579              1/1     1            1           84s\n\nNAME                                                       DESIRED   CURRENT   READY   AGE\nreplicaset.apps/metallb-1607085578-controller-864c9757f6   1         1         1       81s\nreplicaset.apps/traefik-1607085579-77bbc57699              1         1         1       81s\n

Take note of the EXTERNAL-IP given to the service/traefik-n load balancer. In this example, 192.168.0.5 has been assigned and can be used to access services via the Ingress proxy:

NAME                         TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                      AGE\nservice/traefik-1607085579   LoadBalancer   10.105.119.102   192.168.0.5      80:32153/TCP,443:30791/TCP   84s\n

Receiving a 404 response here is normal, as you've not configured any Ingress resources to respond yet:

curl http://192.168.0.5\n
404 page not found\n
"},{"location":"examples/traefik-ingress/#3-deploy-and-access-the-traefik-dashboard","title":"3. Deploy and access the Traefik Dashboard","text":"

With an available and addressable load balancer present on your cluster, now you can quickly deploy the Traefik dashboard and access it from anywhere on your LAN (assuming that MetalLB is configured with an addressable range).

  1. Create the Traefik Dashboard IngressRoute in a YAML file:

    apiVersion: traefik.containo.us/v1alpha1\nkind: IngressRoute\nmetadata:\nname: dashboard\nspec:\nentryPoints:\n- web\nroutes:\n- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)\nkind: Rule\nservices:\n- name: api@internal\nkind: TraefikService\n
  2. Deploy the resource:

    kubectl apply -f traefik-dashboard.yaml\n

    Output:

    ingressroute.traefik.containo.us/dashboard created\n

    At this point you should be able to access the dashboard using the EXTERNAL-IP that you noted above by visiting http://192.168.0.5/dashboard/ in your browser:

  3. Create a simple whoami Deployment, Service, and Ingress manifest:

    apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: whoami-deployment\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: whoami\ntemplate:\nmetadata:\nlabels:\napp: whoami\nspec:\ncontainers:\n- name: whoami-container\nimage: containous/whoami\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: whoami-service\nspec:\nports:\n- name: http\ntargetPort: 80\nport: 80\nselector:\napp: whoami\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: whoami-ingress\nspec:\nrules:\n- http:\npaths:\n- path: /whoami\npathType: Exact\nbackend:\nservice:\nname: whoami-service\nport:\nnumber: 80\n
  4. Apply the manifests:

    kubectl apply -f whoami.yaml\n

    Output:

    deployment.apps/whoami-deployment created\nservice/whoami-service created\ningress.networking.k8s.io/whoami-ingress created\n
  5. Test the ingress and service:

    curl http://192.168.0.5/whoami\n

    Output:

    Hostname: whoami-deployment-85bfbd48f-7l77c\nIP: 127.0.0.1\nIP: ::1\nIP: 10.244.214.198\nIP: fe80::b049:f8ff:fe77:3e64\nRemoteAddr: 10.244.214.196:34858\nGET /whoami HTTP/1.1\nHost: 192.168.0.5\nUser-Agent: curl/7.68.0\nAccept: */*\nAccept-Encoding: gzip\nX-Forwarded-For: 192.168.0.82\nX-Forwarded-Host: 192.168.0.5\nX-Forwarded-Port: 80\nX-Forwarded-Proto: http\nX-Forwarded-Server: traefik-1607085579-77bbc57699-b2f2t\nX-Real-Ip: 192.168.0.82\n
"},{"location":"examples/traefik-ingress/#further-details","title":"Further details","text":"

With the Traefik Ingress Controller it is possible to use 3rd party tools, such as ngrok, to go further and expose your load balancer to the world. In doing this you enable dynamic certificate provisioning through Let's Encrypt, using either cert-manager or Traefik's own built-in ACME provider.

"},{"location":"examples/oidc/oidc-cluster-configuration/","title":"OpenID Connect integration","text":"

Developers use kubectl to access Kubernetes clusters. By default kubectl uses a certificate to authenticate to the Kubernetes API. This means that when multiple developers need to access a cluster, the certificate needs to be shared. Sharing the credentials to access a Kubernetes cluster presents a significant security problem. Compromise of the certificate is very easy and the consequences can be catastrophic.

In this tutorial, we walk through how to set up your Kubernetes cluster to add Single Sign-On support for kubectl using OpenID Connect (OIDC).

"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authentication","title":"OpenID Connect based authentication","text":"

OpenID Connect can be enabled by modifying k0s configuration (using extraArgs).

"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-overview","title":"Configuring k0s: overview","text":"

There are list of arguments for the kube-api that allows us to manage OIDC based authentication

Parameter Description Example Required --oidc-issuer-url URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https:// scheme are accepted. This is typically the provider's discovery URL without a path, for example \"https://accounts.google.com\" or \"https://login.salesforce.com\". This URL should point to the level below .well-known/openid-configuration If the discovery URL is https://accounts.google.com/.well-known/openid-configuration, the value should be https://accounts.google.com Yes --oidc-client-id A client id that all tokens must be issued for. kubernetes Yes --oidc-username-claim JWT claim to use as the user name. By default sub, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as email or name, depending on their provider. However, claims other than email will be prefixed with the issuer URL to prevent naming clashes with other plugins. sub No --oidc-username-prefix Prefix prepended to username claims to prevent clashes with existing names (such as system: users). For example, the value oidc: will create usernames like oidc:jane.doe. If this flag isn't provided and --oidc-username-claim is a value other than email the prefix defaults to ( Issuer URL )# where ( Issuer URL ) is the value of --oidc-issuer-url. The value - can be used to disable all prefixing. oidc: No --oidc-groups-claim JWT claim to use as the user's group. If the claim is present it must be an array of strings. groups No --oidc-groups-prefix Prefix prepended to group claims to prevent clashes with existing names (such as system: groups). For example, the value oidc: will create group names like oidc:engineering and oidc:infra. oidc: No --oidc-required-claim A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims. claim=value No --oidc-ca-file The path to the certificate for the CA that signed your identity provider's web certificate. Defaults to the host's root CAs. /etc/kubernetes/ssl/kc-ca.pem No

To set up bare minimum example we need to use:

  • oidc-issuer-url
  • oidc-client-id
  • oidc-username-claim
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-prerequisites","title":"Configuring k0s: prerequisites","text":"

You will require:

  • issuer-url
  • client-id
  • username-claim

Please, refer to providers configuration guide or your selected OIDC provider's own documentation (we don't cover all of them in k0s docs).

"},{"location":"examples/oidc/oidc-cluster-configuration/#configuration-example","title":"Configuration example","text":"
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\napi:\nextraArgs:\noidc-issuer-url: <issuer-url>\noidc-client-id: <client-id>\noidc-username-claim: email # we use email token claim field as a username\n

Use the configuration as a starting point. Continue with configuration guide for finishing k0s cluster installation.

"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authorisation","title":"OpenID Connect based authorisation","text":"

There are two alternative options to implement authorization

"},{"location":"examples/oidc/oidc-cluster-configuration/#provider-based-role-mapping","title":"Provider based role mapping","text":"

Please refer to the providers configuration guide. Generally speaking, using the oidc-groups-claim argument let's you specify which token claim is used a list of RBAC roles for a given user. You still need somehow sync up that data between your OIDC provider and kube-api RBAC system.

"},{"location":"examples/oidc/oidc-cluster-configuration/#manual-roles-management","title":"Manual roles management","text":"

To use manual role management for each user you will need to create a role and role-binding for each new user within k0s cluster. The role can be shared for all the users. Role example:

---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nnamespace: default\nname: dev-role\nrules:\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [\"*\"]\n

RoleBinding example:

kind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: dev-role-binding\nsubjects:\n- kind: User\nname: <provider side user id>\nroleRef:\nkind: Role\nname: dev-role\napiGroup: rbac.authorization.k8s.io\n

The provided Role example is an all-inclusive and comprehensive example and should be tuned up to your actual requirements.

"},{"location":"examples/oidc/oidc-cluster-configuration/#kubeconfig-management","title":"kubeconfig management","text":"

NB: it's not safe to provide full content of the /var/lib/k0s/pki/admin.conf to the end-user. Instead, create a user specific kubeconfig with limited permissions.

The authorization side of the kubeconfig management is described in provider specific guides. Use /var/lib/k0s/pki/admin.conf as a template for cluster specific kubeconfig.

"},{"location":"examples/oidc/oidc-cluster-configuration/#references","title":"References","text":"

OAuth2 spec Kubernetes authorization system (RBAC) Kubernetes authenticating system

"},{"location":"examples/oidc/oidc-provider-configuration/","title":"Providers","text":"

We use Google Cloud as a provider for the sake of the example. Check your vendor documentation in case if you use some other vendor.

"},{"location":"examples/oidc/oidc-provider-configuration/#notes-on-stand-alone-providers","title":"Notes on stand-alone providers","text":"

If you are using stand-alone OIDC provider, you might need to specify oidc-ca-file argument for the kube-api.

"},{"location":"examples/oidc/oidc-provider-configuration/#google-cloud","title":"Google Cloud","text":"

We use k8s-oidc-helper tool to create proper kubeconfig user record.

The issuer URL for the Google cloud is https://accounts.google.com

"},{"location":"examples/oidc/oidc-provider-configuration/#creating-an-application","title":"Creating an application","text":"
  • Go to the Google Cloud Dashboard
  • Create a new project in your organization
  • Go to the \"Credentials\" page
  • Create \"OAuth consent screen\"
"},{"location":"examples/oidc/oidc-provider-configuration/#creating-a-user-credentials","title":"Creating a user credentials","text":"
  • Go to the Google Cloud Dashboard
  • Go to the \"Credentials\" page
  • Create new credentials. Select \"OAuth client ID\" as a type.
  • Select \"Desktop\" app as an application type.
  • Save client ID and client secret
"},{"location":"examples/oidc/oidc-provider-configuration/#creating-kubeconfig-user-record","title":"Creating kubeconfig user record","text":"

Use the command and follow the instructions:

k8s-oidc-helper --client-id=<CLIENT_ID> \\\n--client-secret=<CLIENT_SECRET> \\\n--write=true\n
"},{"location":"examples/oidc/oidc-provider-configuration/#using-kubelogin","title":"Using kubelogin","text":"

For other OIDC providers it is possible to use kubelogin plugin. Please refer to the setup guide for details.

"},{"location":"examples/oidc/oidc-provider-configuration/#google-cloud-example-using-kubelogin","title":"Google Cloud example using kubelogin","text":"
kubectl oidc-login setup \\\n--oidc-issuer-url=https://accounts.google.com \\\n--oidc-client-id=<CLIENT_ID> \\\n--oidc-client-secret=<CLIENT_SECRET>\n\n  kubectl config set-credentials oidc \\\n--exec-api-version=client.authentication.k8s.io/v1beta1 \\\n--exec-command=kubectl \\\n--exec-arg=oidc-login \\\n--exec-arg=get-token \\\n--exec-arg=--oidc-issuer-url=https://accounts.google.com \\\n--exec-arg=--oidc-client-id=<CLIENT_ID>  \\\n--exec-arg=--oidc-client-secret=<CLIENT_SECRET>\n

You can switch the current context to oidc.

kubectl config set-context --current --user=oidc

"},{"location":"internal/publishing_docs_using_mkdocs/","title":"Publishing Docs","text":"

We use mkdocs and mike for publishing docs to docs.k0sproject.io. This guide will provide a simple how-to on how to configure and deploy newly added docs to our website.

"},{"location":"internal/publishing_docs_using_mkdocs/#requirements","title":"Requirements","text":"

Install mike: https://github.com/jimporter/mike#installation

"},{"location":"internal/publishing_docs_using_mkdocs/#adding-a-new-link-to-the-navigation","title":"Adding A New link to the Navigation","text":"
  • All docs must live under the docs directory (I.E., changes to the main README.md are not reflected in the website).
  • Add a new link under nav in the main mkdocs.yml file:
nav:\n- Overview: README.md\n- Creating A Cluster:\n- Quick Start Guide: create-cluster.md\n- Run in Docker: k0s-in-docker.md\n- Single node set-up: k0s-single-node.md\n- Configuration Reference:\n- Architecture: architecture.md\n- Networking: networking.md\n- Configuration Options: configuration.md\n- Using Cloud Providers: cloud-providers.md\n- Running k0s with Traefik: examples/traefik-ingress.md\n- Running k0s as a service: install.md\n- k0s CLI Help Pages: cli/k0s.md\n- Deploying Manifests: manifests.md\n- FAQ: FAQ.md\n- Troubleshooting: troubleshooting.md\n- Contributing:\n- Overview: contributors/overview.md\n- Workflow: contributors/github_workflow.md\n- Testing: contributors/testing.md\n
  • Once your changes are pushed to main, the \"Publish Docs\" jos will start running: https://github.com/k0sproject/k0s/actions?query=workflow%3A%22Publish+docs+via+GitHub+Pages%22
  • You should see the deployment outcome in the gh-pages deployment page: https://github.com/k0sproject/k0s/deployments/activity_log?environment=github-pages
"},{"location":"internal/publishing_docs_using_mkdocs/#testing-docs-locally","title":"Testing docs locally","text":"

We've got a dockerized setup for easily testing docs locally. Simply run make docs-serve-dev. The docs will be available on http://localhost:8000.

Note If you have something already running locally on port 8000 you can choose another port like so: make docs-serve-dev DOCS_DEV_PORT=9999. The docs will then be available on http://localhost:9999.

"},{"location":"internal/upgrading-calico/","title":"Upgrading Calico","text":"

k0s bundles Kubernetes manifests for Calico. The manifests are retrieved from the official Calico docs.

As fetching and modifying the entire multi-thousand line file is error-prone, you may follow these steps to upgrade Calico to the latest version:

  1. run ./get-calico.sh
  2. check the git diff to see if it looks sensible
  3. re-apply our manual adjustments (documented below)
  4. run make bindata-manifests
  5. compile, pray, and test
  6. commit and create a PR
"},{"location":"internal/upgrading-calico/#manual-adjustments","title":"Manual Adjustments","text":"

Note: All manual adjustments should be fairly obvious from the git diff. This section attempts to provide a sanity checklist to go through and make sure we still have those changes applied. The code blocks in this section are our modifications, not the calico originals.

To see the diff without CRDs, you can do something like:

git diff ':!static/manifests/calico/CustomResourceDefinition'\n

That'll make it easier to spot any needed changes.

static/manifests/calico/DaemonSet/calico-node.yaml:

  • variable-based support for both vxlan and ipip (search for ipip to find):
{{- if eq .Mode \"ipip\" }}\n# Enable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: {{ .Overlay }}\n# Enable or Disable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: \"Never\"\n{{- else if eq .Mode \"vxlan\" }}\n# Disable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: \"Never\"\n# Enable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: {{ .Overlay }}\n- name: FELIX_VXLANPORT\nvalue: \"{{ .VxlanPort }}\"\n- name: FELIX_VXLANVNI\nvalue: \"{{ .VxlanVNI }}\"\n{{- end }}\n
  • iptables auto detect:
# Auto detect the iptables backend\n- name: FELIX_IPTABLESBACKEND\nvalue: \"auto\"\n
  • variable-based WireGuard support:
{{- if .EnableWireguard }}\n- name: FELIX_WIREGUARDENABLED\nvalue: \"true\"\n{{- end }}\n
  • variable-based cluster CIDR:
- name: CALICO_IPV4POOL_CIDR\nvalue: \"{{ .ClusterCIDR }}\"\n
  • custom backend and MTU
# calico-config.yaml\ncalico_backend: \"{{ .Mode }}\"\nveth_mtu: \"{{ .MTU }}\"\n
  • remove bgp from CLUSTER_TYPE
- name: CLUSTER_TYPE\nvalue: \"k8s\"\n
  • disable BIRD checks on liveness and readiness as we don't support BGP by removing -bird-ready and -bird-live from the readiness and liveness probes respectively
"},{"location":"internal/upgrading-calico/#container-image-names","title":"Container image names","text":"

Instead of hardcoded image names and versions use placeholders to support configuration level settings. Following placeholders are used:

  • CalicoCNIImage for calico/cni
  • CalicoNodeImage for calico/node
  • CalicoKubeControllersImage for calico/kube-controllers

Also, all containers in manifests were modified to have 'imagePullPolicy' field:

imagePullPolicy: {{ .PullPolicy }}\n

Example:

# calico-node.yaml\nimage: {{ .CalicoCNIImage }}\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"k0s - The Zero Friction Kubernetes","text":"

k0s is an open source, all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster. Due to its simple design, flexible deployment options and modest system requirements, k0s is well suited for

  • Any cloud
  • Bare metal
  • Edge and IoT

k0s drastically reduces the complexity of installing and running a CNCF certified Kubernetes distribution. With k0s new clusters can be bootstrapped in minutes and developer friction is reduced to zero. This allows anyone with no special skills or expertise in Kubernetes to easily get started.

k0s is distributed as a single binary with zero host OS dependencies besides the host OS kernel. It works with any Linux without additional software packages or configuration. Any security vulnerabilities or performance issues can be fixed directly in the k0s distribution that makes it extremely straightforward to keep the clusters up-to-date and secure.

"},{"location":"#what-happened-to-github-stargazers","title":"What happened to Github stargazers?","text":"

In September 2022 we made a human error while creating some build automation scripts&tools for the Github repository. Our automation accidentally changed the repo to a private one for few minutes. That itself is not a big deal and everything was restored quickly. But the nasty side effect is that it also removed all the stargazers at that point. :(

Before that mishap we had 4776 stargazers, making k0s one of the most popular Kubernetes distro out there.

**So if you are reading this, and have not yet starred k0s repo we would highly appreciate the :star: to get our numbers closer to what they used to be.

"},{"location":"#key-features","title":"Key Features","text":"
  • Certified and 100% upstream Kubernetes
  • Multiple installation methods: single-node, multi-node, airgap and Docker
  • Automatic lifecycle management with k0sctl: upgrade, backup and restore
  • Modest system requirements (1 vCPU, 1 GB RAM)
  • Available as a single binary with no external runtime dependencies besides the kernel
  • Flexible deployment options with control plane isolation as default
  • Scalable from a single node to large, high-available clusters
  • Supports custom Container Network Interface (CNI) plugins (Kube-Router is the default, Calico is offered as a preconfigured alternative)
  • Supports custom Container Runtime Interface (CRI) plugins (containerd is the default)
  • Supports all Kubernetes storage options with Container Storage Interface (CSI), includes OpenEBS host-local storage provider
  • Supports a variety of datastore backends: etcd (default for multi-node clusters), SQLite (default for single node clusters), MySQL, and PostgreSQL
  • Supports x86-64, ARM64 and ARMv7
  • Includes Konnectivity service, CoreDNS and Metrics Server
"},{"location":"#getting-started","title":"Getting Started","text":"

Quick Start Guide for creating a full Kubernetes cluster with a single node.

"},{"location":"#demo","title":"Demo","text":""},{"location":"#community-support","title":"Community Support","text":"
  • Lens Forums - Request for support and help from the Lens and k0s community.
  • GitHub Issues - Submit your issues and feature requests via GitHub.

We welcome your help in building k0s! If you are interested, we invite you to check out the Contributing Guide and the Code of Conduct.

"},{"location":"#commercial-support","title":"Commercial Support","text":"

Mirantis offers technical support, professional services and training for k0s. The support subscriptions include, for example, prioritized support (Phone, Web, Email) and access to verified extensions on top of your k0s cluster.

For any k0s inquiries, please contact us via email info@k0sproject.io.

"},{"location":"CODE_OF_CONDUCT/","title":"k0s Community Code Of Conduct","text":"

Please refer to our contributor code of conduct.

"},{"location":"FAQ/","title":"Frequently asked questions","text":""},{"location":"FAQ/#how-is-k0s-pronounced","title":"How is k0s pronounced?","text":"

kay-zero-ess

"},{"location":"FAQ/#how-do-i-run-a-single-node-cluster","title":"How do I run a single node cluster?","text":"

The cluster can be started with:

k0s controller --single\n

See also the Getting Started tutorial.

"},{"location":"FAQ/#how-do-i-connect-to-the-cluster","title":"How do I connect to the cluster?","text":"

You find the config in ${DATADIR}/pki/admin.conf (default: /var/lib/k0s/pki/admin.conf). Copy this file, and change the localhost entry to the public ip of the controller. Use the modified config to connect with kubectl:

export KUBECONFIG=/path/to/admin.conf\nkubectl ...\n
"},{"location":"FAQ/#why-doesnt-kubectl-get-nodes-list-the-k0s-controllers","title":"Why doesn't kubectl get nodes list the k0s controllers?","text":"

As a default, the control plane does not run kubelet at all, and will not accept any workloads, so the controller will not show up on the node list in kubectl. If you want your controller to accept workloads and run pods, you do so with: k0s controller --enable-worker (recommended only as test/dev/POC environments).

"},{"location":"FAQ/#is-k0sproject-really-open-source","title":"Is k0sproject really open source?","text":"

Yes, k0sproject is 100% open source. The source code is under Apache 2 and the documentation is under the Creative Commons License. Mirantis, Inc. is the main contributor and sponsor for this OSS project: building all the binaries from upstream, performing necessary security scans and calculating checksums so that it's easy and safe to use. The use of these ready-made binaries are subject to Mirantis EULA and the binaries include only open source software.

"},{"location":"airgap-install/","title":"Airgap install","text":"

You can install k0s in an environment with restricted Internet access. Airgap installation requires an image bundle, which contains all the needed container images. There are two options to get the image bundle:

  • Use a ready-made image bundle, which is created for each k0s release. It can be downloaded from the releases page.
  • Create your own image bundle. In this case, you can easily customize the bundle to also include container images, which are not used by default in k0s.
"},{"location":"airgap-install/#prerequisites","title":"Prerequisites","text":"

In order to create your own image bundle, you need

  • A working cluster with at least one controller, to be used to build the image bundle. For more information, refer to the Quick Start Guide.
  • The containerd CLI management tool ctr, installed on the worker machine (refer to the ContainerD getting-started guide).
"},{"location":"airgap-install/#1-create-your-own-image-bundle-optional","title":"1. Create your own image bundle (optional)","text":"

k0s/containerd uses OCI (Open Container Initiative) bundles for airgap installation. OCI bundles must be uncompressed. As OCI bundles are built specifically for each architecture, create an OCI bundle that uses the same processor architecture (x86-64, ARM64, ARMv7) as on the target system.

k0s offers two methods for creating OCI bundles, one using Docker and the other using a previously set up k0s worker. Be aware, though, that you cannot use the Docker method for the ARM architectures due to kube-proxy image multiarch manifest problem.

Note: k0s strictly matches image architecture, e.g. arm/v7 images won't work for arm64.

"},{"location":"airgap-install/#docker","title":"Docker","text":"
  1. Pull the images.

    k0s airgap list-images | xargs -I{} docker pull {}\n
  2. Create a bundle.

    docker image save $(k0s airgap list-images | xargs) -o bundle_file\n
"},{"location":"airgap-install/#previously-set-up-k0s-worker","title":"Previously set up k0s worker","text":"

As containerd pulls all the images during the k0s worker normal bootstrap, you can use it to build the OCI bundle with images.

Use the following commands on a machine with an installed k0s worker:

ctr --namespace k8s.io \\\n--address /run/k0s/containerd.sock \\\nimages export bundle_file $(k0s airgap list-images | xargs)\n
"},{"location":"airgap-install/#2a-sync-the-bundle-file-with-the-airgapped-machine-locally","title":"2a. Sync the bundle file with the airgapped machine (locally)","text":"

Copy the bundle_file you created in the previous step or downloaded from the releases page to the target machine into the images directory in the k0s data directory. Copy the bundle only to the worker nodes. Controller nodes don't use it.

# mkdir -p /var/lib/k0s/images\n# cp bundle_file /var/lib/k0s/images/bundle_file\n
"},{"location":"airgap-install/#2b-sync-the-bundle-file-with-the-airgapped-machines-remotely-with-k0sctl","title":"2b. Sync the bundle file with the airgapped machines (remotely with k0sctl)","text":"

As an alternative to the previous step, you can use k0sctl to upload the bundle file to the worker nodes. k0sctl can also be used to upload k0s binary file to all nodes. Take a look at this example (k0sctl.yaml) with one controller and one worker node to upload the bundle file and k0s binary:

apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: 1.28.1+k0s.0\nhosts:\n- role: controller\nssh:\naddress: <controller-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\n\n#  uploadBinary: <boolean>\n#    When true the k0s binaries are cached and uploaded\n#    from the host running k0sctl instead of downloading\n#    directly to the target host.\nuploadBinary: true\n\n#  k0sBinaryPath: <local filepath>\n#    Upload a custom or manually downloaded k0s binary\n#    from a local path on the host running k0sctl to the\n#    target host.\n# k0sBinaryPath: path/to/k0s_binary/k0s\n\n- role: worker\nssh:\naddress: <worker-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\nuploadBinary: true\nfiles:\n# This airgap bundle file will be uploaded from the k0sctl\n# host to the specified directory on the target host\n- src: /local/path/to/bundle-file/airgap-bundle-amd64.tar\ndstDir: /var/lib/k0s/images/\nperm: 0755\n
"},{"location":"airgap-install/#3-ensure-pull-policy-in-the-k0syaml-optional","title":"3. Ensure pull policy in the k0s.yaml (optional)","text":"

Use the following k0s.yaml to ensure that containerd does not pull images for k0s components from the Internet at any time.

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nimages:\ndefault_pull_policy: Never\n
"},{"location":"airgap-install/#4-set-up-the-controller-and-worker-nodes","title":"4. Set up the controller and worker nodes","text":"

Refer to the Manual Install for information on setting up the controller and worker nodes locally. Alternatively, you can use k0sctl.

Note: During the worker start up k0s imports all bundles from the $K0S_DATA_DIR/images before starting kubelet.

"},{"location":"architecture/","title":"Architecture","text":"

Note: As k0s is a new and dynamic project, the product architecture may occasionally outpace the documentation. The high level concepts and patterns, however, should always apply.

"},{"location":"architecture/#packaging","title":"Packaging","text":"

The k0s package is a single, self-extracting binary that embeds Kubernetes binaries, the benefits of which include:

  • Statically compiled
  • No OS-level dependencies
  • Requires no RPMs, dependencies, snaps, or any other OS-specific packaging
  • Provides a single package for all operating systems
  • Allows full version control for each dependency

"},{"location":"architecture/#control-plane","title":"Control plane","text":"

As a single binary, k0s acts as the process supervisor for all other control plane components. As such, there is no container engine or kubelet running on controllers by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.

Using k0s you can create, manage, and configure each of the components, running each as a \"naked\" process. Thus, there is no container engine running on the controller node.

"},{"location":"architecture/#storage","title":"Storage","text":"

Kubernetes control plane typically supports only etcd as the datastore. k0s, however, supports many other datastore options in addition to etcd, which it achieves by including kine. Kine allows the use of a wide variety of backend data stores, such as MySQL, PostgreSQL, SQLite, and dqlite (refer to the spec.storage documentation).

In the case of k0s managed etcd, k0s manages the full lifecycle of the etcd cluster. For example, by joining a new controller node with k0s controller \"long-join-token\" k0s atomatically adjusts the etcd cluster membership info to allow the new member to join the cluster.

Note: k0s cannot shrink the etcd cluster. As such, to shut down the k0s controller on a node that node must first be manually removed from the etcd cluster.

"},{"location":"architecture/#worker-node","title":"Worker node","text":"

As with the control plane, with k0s you can create and manage the core worker components as naked processes on the worker node.

By default, k0s workers use containerd as a high-level runtime and runc as a low-level runtime. Custom runtimes are also supported, refer to Using custom CRI runtime.

"},{"location":"autopilot-multicommand/","title":"Multi-Command Plans","text":"

Autopilot relies on a Plan for defining the Commands that should be executed, the Signal Nodes that each should be run on, and the status of each Command.

A Plan:

  • Defines one or many Commands that specify what actions should be performed.
  • Specifies how Signal Nodes should be discovered per-Command.
  • Saves the status of the Plan execution by resolved Signal Nodes

A Command:

  • An instructional step inside of a Plan that is applied against a Signal Node

A Signal Node:

  • Any node (controller or worker) that can receive updates with Autopilot.
"},{"location":"autopilot-multicommand/#execution","title":"Execution","text":"

The execution of a Plan is the result of processing Commands through a number of Processing States.

When a Plan is executed, each of the Commands are executed in the order of their appearance in the Plan.

  • A Plan transitions to the next Command only when the current Command posts a state of Completed.
  • Any Command that posts one of the recognized Error States will result in the current Command and Plan to abort processing.
    • The status of the Command and Plan will reflect this.
  • A Plan is considered finished only when all of its defined Commands post a Completed state.
"},{"location":"autopilot-multicommand/#status","title":"Status","text":"

The progress and state of each Command is recorded in the Plan status.

  • Every Command in the Plan has an associated status entry with the same index as the Command
    • ie. The second Command in a Plan has an index of 1, and so does its status.
  • The status of all Commands is taken into consideration when determining if the Plan is finished.
"},{"location":"autopilot-multicommand/#example","title":"Example","text":"

The following is an example of a Plan that has been applied as is currently being processed by autopilot.

(line numbers added for commentary below)

 1: apiVersion: autopilot.k0sproject.io/v1beta2\n2:  kind: Plan\n3:  metadata:\n4:    annotations:\n5:      <omitted>\n6:  spec:\n7:    commands:\n8:    - airgapupdate:\n9:        version: v1.28.1+k0s.0\n10:        platforms:\n11:          linux-amd64:\n12:            url: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-airgap-bundle-v1.28.1+k0s.0-amd64\n13:        workers:\n14:          discovery:\n15:            static:\n16:              nodes:\n17:              - worker0\n18:    - k0supdate:\n19:        version: v1.28.1+k0s.0\n20:        platforms:\n21:          linux-amd64:\n22:            url: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-amd64\n23:        targets:\n24:          controllers:\n25:            discovery:\n26:              static:\n27:                nodes:\n28:                - controller0\n29:          workers:\n30:            discovery:\n31:              static:\n32:                nodes:\n33:                - worker0\n34:    id: id123\n35:    timestamp: now\n36:  status:\n37:    commands:\n38:    - airgapupdate:\n39:        workers:\n40:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n41:          name: worker0\n42:          state: SignalSent\n43:      id: 0\n44:      state: SchedulableWait\n45:    - id: 1\n46:      k0supdate:\n47:        controllers:\n48:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n49:          name: controller0\n50:          state: SignalPending\n51:        workers:\n52:        - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n53:          name: worker0\n54:          state: SignalPending\n55:      state: SchedulableWait\n56:    state: SchedulableWait\n
  • Lines 7-33 are the two Commands that make up this plan -- an airgapupdate and k0supdate.
  • Lines 38-55 are the associated status entries for the two Commands.

The state of this Plan exerpt is that autopilot has successfully processed the Plan, and has begun processing the airgapupdate Command. Its status indicates SignalSent which means that the Signal Node has been sent signaling information to perform an airgap update.

"},{"location":"autopilot-multicommand/#processing-states","title":"Processing States","text":"

The following are the various states that both Plans and Commands adhere to.

stateDiagram-v2\n    [*]-->NewPlan\n    NewPlan-->SchedulableWait\n    NewPlan-->Errors***\n\n    SchedulableWait-->Schedulable\n    SchedulableWait-->Completed\n    Schedulable-->SchedulableWait\n\n    Errors***-->[*]\n    Completed-->[*]

Note that the Errors state is elaborated in detail below in Error States*.

"},{"location":"autopilot-multicommand/#newplan","title":"NewPlan","text":"

When a Plan is created with the name autopilot, the NewPlan state processing takes effect.

It is the responsibility of NewPlan to ensure that the status of all the Commands are represented in the Plan status. This Plan status is needed at later points in Plan processing to determine if the entire Plan is completed.

The main difference between NewPlan and all the other states is that NewPlan will iterate over all commands; the other states deal with the active command.

"},{"location":"autopilot-multicommand/#schedulablewait","title":"SchedulableWait","text":"

Used to evaluate a Command to determine if it can be scheduled for processing. If the Command is determined that it can be processed, the state is set to Schedulable.

"},{"location":"autopilot-multicommand/#schedulable","title":"Schedulable","text":"

The Schedulable state is set by SchedulableWait to indicate that this command should execute. The execution of a Command in this state will be whichever logic is defined by the Command.

The ending of this state should either transition to SchedulableWait for further processing + completion detection, or transition to an error.

"},{"location":"autopilot-multicommand/#completed","title":"Completed","text":"

The Completed state indicates that the command has finished processing. Once a plan/command are in the Completed state, no further processing will occur on this plan/command.

"},{"location":"autopilot-multicommand/#error-states","title":"Error States","text":"

When a plan or command processing goes into one of the designated error states, this is considered fatal and the plan/command processing will terminate.

Error states are generally defined by the Command implementation. The core autopilot functionality is only interested when in the 4 core states (NewPlan, SchedulableWait, Schedulable, Completed), and treats all other states as an error.

flowchart TD\n    Errors --> InconsistentTargets\n    Errors --> IncompleteTargets\n    Errors --> Restricted\n    Errors --> MissingPlatform\n    Errors --> MissingSignalNode
Error State Command States Description InconsistentTargets k0supdate Schedulable Indicates that a Signal Node probe has failed for any node that was previously discovered during NewPlan. IncompleteTargets airgapupdate, k0supdate NewPlan, Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no ControlNode or Node object) Restricted airgapupdate, k0supdate NewPlan Indicates that a Plan has requested an update of a Signal Node type that contradicts the startup exclusions (the --exclude-from-plans argument) MissingSignalNode airgapupdate, k0supdate Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no matching ControlNode or Node object)"},{"location":"autopilot-multicommand/#sequence-example","title":"Sequence: Example","text":"

Using the example above as a reference, this outlines the basic sequence of events of state transitions to the operations performed on each object.

sequenceDiagram\n  PlanStateHandler->>+AirgapUpdateCommand: State: NewPlan\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.NewPlan() -- >SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: NewPlan\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.NewPlan() --> SchedulableWait\n  Note over PlanStateHandler,SignalNode(worker0): NewPlan Finished / All Commands\n\n  PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+AirgapUpdateCommand: State: Schedulable\n  AirgapUpdateCommand->>-SignalNode(worker0): signal_v2(airgap-data) --> SchedulableWait\n  PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n  AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(worker0): AirgapUpdate Finished / worker0\n\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n  K0sUpdateCommand->>-SignalNode(controller0): signal_v2(k0s-data) --> SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(controller0): K0sUpdate Finished / controller0\n\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n  PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n  K0sUpdateCommand->>-SignalNode(worker0): signal_v2(k0s-data) --> SchedulableWait\n  PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n  K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n  Note over PlanStateHandler,SignalNode(worker0): K0sUpdate Finished / worker0\n\n  PlanStateHandler->>PlanStateHandler: Completed
"},{"location":"autopilot/","title":"Autopilot","text":"

A tool for updating your k0s controller and worker nodes using specialized plans. There is a public update-server hosted on the same domain as the documentation site. See the example below on how to use it. There is only a single channel edge_release available. The channel exposes the latest released version.

"},{"location":"autopilot/#how-it-works","title":"How it works","text":"
  • You create a Plan YAML
    • Defining the update payload (new version of k0s, URLs for platforms, etc)
    • Add definitions for all the nodes that should receive the update.
      • Either statically, or dynamically using label/field selectors
  • Apply the Plan
    • Applying a Plan is a simple kubectl apply operation.
  • Monitor the progress
    • The applied Plan provides a status that details the progress.
"},{"location":"autopilot/#automatic-updates","title":"Automatic updates","text":"

To enable automatic updates, create an UpdateConfig object:

apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdateConfig\nmetadata:\nname: example\nnamespace: default\nspec:\nchannel: edge_release\nupdateServer: https://docs.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n
"},{"location":"autopilot/#safeguards","title":"Safeguards","text":"

There are a number of safeguards in place to avoid breaking a cluster.

"},{"location":"autopilot/#stateless-component","title":"Stateless Component","text":"
  • The autopilot component were designed to not require any heavy state, or massive synchronization. Controllers can disappear, and backup controllers can resume the autopilot operations.
"},{"location":"autopilot/#workers-update-only-after-controllers","title":"Workers Update Only After Controllers","text":"
  • The versioning that Kubelet and the Kubernetes API server adhere to requires that Kubelets should not be of a newer version than the API server.
  • How autopilot handles this is that when a Plan is applied that has both controller and worker nodes, all of the controller nodes will be updated first. It is only when all controllers have updated successfully that worker nodes will receive their update instructions.
"},{"location":"autopilot/#plans-are-immutable","title":"Plans are Immutable","text":"
  • When you apply a Plan, autopilot evaluates all of the controllers and workers that should be included into the Plan, and tracks them in the status. After this point, no additional changes to the plan (other than status) will be recognized.
    • This helps in largely dynamic worker node environments where nodes that may have been matched by the selector discovery method no longer exist by the time the update is ready to be scheduled.
"},{"location":"autopilot/#controller-quorum-safety","title":"Controller Quorum Safety","text":"
  • Prior to scheduling a controller update, autopilot queries the API server of all controllers to ensure that they report a successful /ready
  • Only once all controllers are /ready will the current controller get sent update signaling.
  • In the event that any controller reports a non-ready, the Plan transitions into an InconsistentTargets state, and the Plan execution ends.
"},{"location":"autopilot/#controllers-update-sequentially","title":"Controllers Update Sequentially","text":"
  • Despite having the configuration options for controllers to set concurrency, only one controller will be updated at a time.
"},{"location":"autopilot/#update-payload-verification","title":"Update Payload Verification","text":"
  • Each update object payload can provide an optional sha256 hash of the update content (specified in url), which is compared against the update content after it downloads.
"},{"location":"autopilot/#configuration","title":"Configuration","text":"

Autopilot relies on a Plan object on its instructions on what to update.

Here is an arbitrary Autopilot plan:

apiVersion: autopilot.k0sproject.io/v1beta2\nkind: Plan\nmetadata:\nname: autopilot\n\nspec:\nid: id1234\ntimestamp: now\n\ncommands:\n- k0supdate:\nversion: v1.28.1+k0s.0\nplatforms:\nlinux-amd64:\nurl: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-amd64\nsha256: '0000000000000000000000000000000000000000000000000000000000000000'\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#core-fields","title":"Core Fields","text":""},{"location":"autopilot/#apiversion-string-required","title":"apiVersion <string> (required)","text":"
  • The current version of the Autopilot API is v1beta2, with a full group-version of autopilot.k0sproject.io/v1beta2
"},{"location":"autopilot/#metadataname-string-required","title":"metadata.name <string> (required)","text":"
  • The name of the plan should always be autopilot
    • Note: Plans will not execute if they don't follow this convention.
"},{"location":"autopilot/#spec-fields","title":"Spec Fields","text":""},{"location":"autopilot/#specid-string-optional","title":"spec.id <string> (optional)","text":"
  • An identifier that can be provided by the creator for informational and tracking purposes.
"},{"location":"autopilot/#spectimestamp-string-optional","title":"spec.timestamp <string> (optional)","text":"
  • A timestamp value that can be provided by the creator for informational purposes. Autopilot does nothing with this information.
"},{"location":"autopilot/#speccommands-required","title":"spec.commands[] (required)","text":"
  • The commands contains all of the commands that should be performed as a part of the plan.
"},{"location":"autopilot/#k0supdate-command","title":"k0supdate Command","text":""},{"location":"autopilot/#speccommandsk0supdateversion-string-required","title":"spec.commands[].k0supdate.version <string> (required)","text":"
  • The version of the binary being updated. This version is used to compare against the installed version before and after update to ensure success.
"},{"location":"autopilot/#speccommandsk0supdateplatformsurl-string-required","title":"spec.commands[].k0supdate.platforms.*.url <string> (required)","text":"
  • An URL providing where the updated binary should be downloaded from, for this specific platform.
    • The naming of platforms is a combination of $GOOS and $GOARCH, separated by a hyphen (-)
      • eg: linux-amd64, linux-arm64, linux-arm
    • Note: The main supported platform is linux. Autopilot may work on other platforms, however this has not been tested.
"},{"location":"autopilot/#speccommandsk0supdateplatformssha256-string-optional","title":"spec.commands[].k0supdate.platforms.*.sha256 <string> (optional)","text":"
  • If a SHA256 hash is provided for the binary, the completed downloaded will be verified against it.
"},{"location":"autopilot/#speccommandsk0supdatetargetscontrollers-object-optional","title":"spec.commands[].k0supdate.targets.controllers <object> (optional)","text":"
  • This object provides the details of how controllers should be updated.
"},{"location":"autopilot/#speccommandsk0supdatetargetscontrollerslimitsconcurrent-int-fixed-as-1","title":"spec.commands[].k0supdate.targets.controllers.limits.concurrent <int> (fixed as 1)","text":"
  • The configuration allows for specifying the number of concurrent controller updates through the plan spec, however for controller targets this is fixed always to 1.
  • By ensuring that only one controller updates at a time, we aim to avoid scenarios where quorom may be disrupted.
"},{"location":"autopilot/#speccommandsk0supdatetargetsworkers-object-optional","title":"spec.commands[].k0supdate.targets.workers <object> (optional)","text":"
  • This object provides the details of how workers should be updated.
"},{"location":"autopilot/#speccommandsk0supdatetargetsworkerslimitsconcurrent-int-optional-default-1","title":"spec.commands[].k0supdate.targets.workers.limits.concurrent <int> (optional, default = 1)","text":"
  • Specifying a concurrent value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1 is assumed.
"},{"location":"autopilot/#airgapupdate-command","title":"airgapupdate Command","text":""},{"location":"autopilot/#speccommandsairgapupdateversion-string-required","title":"spec.commands[].airgapupdate.version <string> (required)","text":"
  • The version of the airgap bundle being updated.
"},{"location":"autopilot/#speccommandsairgapupdateplatformsurl-string-required","title":"spec.commands[].airgapupdate.platforms.*.url <string> (required)","text":"
  • An URL providing where the updated binary should be downloaded from, for this specific platform.
    • The naming of platforms is a combination of $GOOS and $GOARCH, separated by a hyphen (-)
      • eg: linux-amd64, linux-arm64, linux-arm
    • Note: The main supported platform is linux. Autopilot may work on other platforms, however this has not been tested.
"},{"location":"autopilot/#speccommandsairgapupdateplatformssha256-string-optional","title":"spec.commands[].airgapupdate.platforms.*.sha256 <string> (optional)","text":"
  • If a SHA256 hash is provided for the binary, the completed downloaded will be verified against it.
"},{"location":"autopilot/#speccommandsairgapupdatetargetsworkers-object-optional","title":"spec.commands[].airgapupdate.targets.workers <object> (optional)","text":"
  • This object provides the details of how workers should be updated.
"},{"location":"autopilot/#speccommandsairgapupdatetargetsworkerslimitsconcurrent-int-optional-default-1","title":"spec.commands[].airgapupdate.targets.workers.limits.concurrent <int> (optional, default = 1)","text":"
  • Specifying a concurrent value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1 is assumed.
"},{"location":"autopilot/#static-discovery","title":"Static Discovery","text":"

This defines the static discovery method used for this set of targets (controllers, workers). The static discovery method relies on a fixed set of hostnames defined in .nodes.

It is expected that a Node (workers) or ControlNode (controllers) object exists with the same name.

  static:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoverystaticnodes-string-required-for-static","title":"spec.commands[].k0supdate.targets.*.discovery.static.nodes[] <string> (required for static)","text":"
  • A list of hostnames that should be included in target set (controllers, workers).
"},{"location":"autopilot/#selector-target-discovery","title":"Selector Target Discovery","text":"

The selector target discovery method relies on a dynamic query to the Kubernetes API using labels and fields to produce a set of hosts that should be updated.

Providing both labels and fields in the selector definition will result in a logical AND of both operands.

  selector:\nlabels: environment=staging\nfields: metadata.name=worker2\n

Specifying an empty selector will result in all nodes being selected for this target set.

  selector: {}\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorlabels-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.labels <string> (optional)","text":"
  • A collection of name/value labels that should be used for finding the appropriate nodes for the update of this target set.
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorfields-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.fields <string> (optional)","text":"
  • A collection of name/value fields that should be used for finding the appropriate nodes for the update of this target set.
    • Note: Currently only the field metadata.name is available as a query field.
"},{"location":"autopilot/#status-reporting","title":"Status Reporting","text":"

After a Plan has been applied, its progress can be viewed in the .status of the autopilot Plan.

    kubectl get plan autopilot -oyaml\n

An example of a Plan status:

  status:\nstate: SchedulableWait\ncommands:\n- state: SchedulableWait\nk0supdate:\ncontrollers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:44Z\"\nname: controller0\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller1\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller2\nstate: SignalPending\nworkers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker0\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker1\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker2\nstate: SignalPending\n

To read this status, this indicates that:

  • The overall status of the update is SchedulableWait, meaning that autopilot is waiting for the next opportunity to process a command.
  • There are three controller nodes
    • Two controllers have SignalCompleted successfully
    • One is waiting to be signalled (SignalPending)
  • There are also three worker nodes
    • All are awaiting signaling updates (SignalPending)
"},{"location":"autopilot/#plan-status","title":"Plan Status","text":"

The Plan status at .status.status represents the overall status of the autopilot update operation. There are a number of statuses available:

Status Description Ends Plan? IncompleteTargets There are nodes in the resolved Plan that do not have associated Node (worker) or ControlNode (controller) objects. Yes InconsistentTargets A controller has reported itself as not-ready during the selection of the next controller to update. Yes Schedulable Indicates that the Plan can be re-evaluated to determine which next node to update. No SchedulableWait Scheduling operations are in progress, and no further update scheduling should occur. No Completed The Plan has run successfully to completion. Yes Restricted The Plan included node types (controller or worker) that violates the --exclude-from-plans restrictions. Yes"},{"location":"autopilot/#node-status","title":"Node Status","text":"

Similar to the Plan Status, the individual nodes can have their own statuses:

Status Description SignalPending The node is available and awaiting an update signal SignalSent Update signaling has been successfully applied to this node. MissingPlatform This node is a platform that an update has not been provided for. MissingSignalNode This node does have an associated Node (worker) or ControlNode (controller) object."},{"location":"autopilot/#updateconfig","title":"UpdateConfig","text":""},{"location":"autopilot/#updateconfig-core-fields","title":"UpdateConfig Core Fields","text":""},{"location":"autopilot/#apiversion-string-required-field","title":"apiVersion <string> (required field)","text":"
  • API version. The current version of the Autopilot API is v1beta2, with a full group-version of autopilot.k0sproject.io/v1beta2
"},{"location":"autopilot/#metadataname-string-required-field","title":"metadata.name <string> (required field)","text":"
  • Name of the config.
"},{"location":"autopilot/#spec","title":"Spec","text":""},{"location":"autopilot/#specchannel-string-optional","title":"spec.channel <string> (optional)","text":"
  • Update channel to use. Supported values: stable(default), unstable.
"},{"location":"autopilot/#specupdateserver-string-optional","title":"spec.updateServer <string> (optional)","text":"
  • Update server url.
"},{"location":"autopilot/#specupgradestrategycron-string-optional","title":"spec.upgradeStrategy.cron <string> (optional)","text":"
  • Schedule to check for updates in crontab format.
"},{"location":"autopilot/#specplanspec-string-optional","title":"spec.planSpec <string> (optional)","text":"
  • Describes the behavior of the autopilot generated Plan
"},{"location":"autopilot/#example","title":"Example","text":"
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdaterConfig\nmetadata:\nname: example\nspec:\nchannel: stable\nupdateServer: https://updates.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n# Optional. Specifies a created Plan object\nplanSpec:\ncommands:\n- k0supdate: # optional\nforceupdate: true # optional\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\nairgapupdate: # optional\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#faq","title":"FAQ","text":""},{"location":"autopilot/#q-how-do-i-apply-the-plan-and-controlnode-crds","title":"Q: How do I apply the Plan and ControlNode CRDs?","text":"

A: These CRD definitions are embedded in the autopilot binary and applied on startup. No additional action is needed.

"},{"location":"autopilot/#q-how-will-controlnode-instances-get-removed","title":"Q: How will ControlNode instances get removed?","text":"

A: ControlNode instances are created by autopilot controllers as they startup. When controllers disappear, they will not remove their associated ControlNode instance. It is the responsibility of the operator/administrator to ensure their maintenance.

"},{"location":"autopilot/#q-i-upgraded-my-workers-and-now-kubelets-are-no-longer-reporting","title":"Q: I upgraded my workers, and now Kubelets are no longer reporting","text":"

You probably upgraded your workers to an API version greater than what is available on the API server.

https://kubernetes.io/releases/version-skew-policy/

Make sure that your controllers are at the desired version first before upgrading workers.

"},{"location":"backup/","title":"Backup/Restore overview","text":"

k0s has integrated support for backing up cluster state and configuration. The k0s backup utility is aiming to back up and restore k0s managed parts of the cluster.

The backups created by k0s backup command have following pieces of your cluster:

  • certificates (the content of the <data-dir>/pki directory)
  • etcd snapshot, if the etcd datastore is used
  • Kine/SQLite snapshot, if the Kine/SQLite datastore is used
  • k0s.yaml
  • any custom defined manifests under the <data-dir>/manifests
  • any image bundles located under the <data-dir>/images
  • any helm configuration

Parts NOT covered by the backup utility:

  • PersistentVolumes of any running application
  • datastore, in case something else than etcd or Kine/SQLite is used
  • any configuration to the cluster introduced by manual changes (e.g. changes that weren't saved under the <data-dir>/manifests)

Any of the backup/restore related operations MUST be performed on the controller node.

"},{"location":"backup/#backuprestore-a-k0s-node-locally","title":"Backup/restore a k0s node locally","text":""},{"location":"backup/#backup-local","title":"Backup (local)","text":"

To create backup run the following command on the controller node:

k0s backup --save-path=<directory>\n

The directory used for the save-path value must exist and be writable. The default value is the current working directory. The command provides backup archive using following naming convention: k0s_backup_<ISODatetimeString>.tar.gz

Because of the DateTime usage, it is guaranteed that none of the previously created archives would be overwritten.

To output the backup archive to stdout, use - as the save path.

"},{"location":"backup/#restore-local","title":"Restore (local)","text":"

To restore cluster state from the archive use the following command on the controller node:

k0s restore /tmp/k0s_backup_2021-04-26T19_51_57_000Z.tar.gz\n

The command would fail if the data directory for the current controller has overlapping data with the backup archive content.

The command would use the archived k0s.yaml as the cluster configuration description.

In case if your cluster is HA, after restoring single controller node, join the rest of the controller nodes to the cluster. E.g. steps for N nodes cluster would be:

  • Restore backup on fresh machine
  • Run controller there
  • Join N-1 new machines to the cluster the same way as for the first setup.

To read the backup archive from stdin, use - as the file path.

"},{"location":"backup/#encrypting-backups-local","title":"Encrypting backups (local)","text":"

By using - as the save or restore path, it is possible to pipe the backup archive through an encryption utility such as GnuPG or OpenSSL.

Note that unencrypted data will still briefly exist as temporary files on the local file system during the backup archvive generation.

"},{"location":"backup/#encrypting-backups-using-gnupg","title":"Encrypting backups using GnuPG","text":"

Follow the instructions for your operating system to install the gpg command if it is not already installed.

This tutorial only covers the bare minimum for example purposes. For secure key management practices and advanced usage refer to the GnuPG user manual.

To generate a new key-pair, use:

gpg --gen-key\n

The key will be stored in your key ring.

gpg --list-keys\n

This will output a list of keys:

/home/user/.gnupg/pubring.gpg\n------------------------------\npub   4096R/BD33228F 2022-01-13\nuid                  Example User <user@example.com>\nsub   4096R/2F78C251 2022-01-13\n

To export the private key for decrypting the backup on another host, note the key ID (\"BD33228F\" in this example) in the list and use:

gpg --export-secret-keys --armor BD33228F > k0s.key\n

To create an encrypted k0s backup:

k0s backup --save-path - | gpg --encrypt --recipient user@example.com > backup.tar.gz.gpg\n
"},{"location":"backup/#restoring-encrypted-backups-using-gnupg","title":"Restoring encrypted backups using GnuPG","text":"

You must have the private key in your gpg keychain. To import the key that was exported in the previous example, use:

gpg --import k0s.key\n

To restore the encrypted backup, use:

gpg --decrypt backup.tar.gz.gpg | k0s restore -\n
"},{"location":"backup/#backuprestore-a-k0s-cluster-using-k0sctl","title":"Backup/restore a k0s cluster using k0sctl","text":"

With k0sctl you can perform cluster level backup and restore remotely with one command.

"},{"location":"backup/#backup-remote","title":"Backup (remote)","text":"

To create backup run the following command:

k0sctl backup\n

k0sctl connects to the cluster nodes to create a backup. The backup file is stored in the current working directory.

"},{"location":"backup/#restore-remote","title":"Restore (remote)","text":"

To restore cluster state from the archive use the following command:

k0sctl apply --restore-from /path/to/backup_file.tar.gz\n

The control plane load balancer address (externalAddress) needs to remain the same between backup and restore. This is caused by the fact that all worker node components connect to this address and cannot currently be re-configured.

"},{"location":"cis_benchmark/","title":"Kube-bench Security Benchmark","text":"

Kube-bench is an open source tool which can be used to verify security best practices as defined in CIS Kubernetes Benchmark. It provides a number of tests to help harden your k0s clusters. By default, k0s will pass Kube-bench benchmarks with some exceptions, which are shown below.

"},{"location":"cis_benchmark/#run","title":"Run","text":"

Follow the Kube-bench quick start instructions.

After installing the Kube-bench on the host that is running k0s cluster run the following command:

kube-bench run --config-dir docs/kube-bench/cfg/ --benchmark k0s-1.0\n
"},{"location":"cis_benchmark/#summary-of-disabled-checks","title":"Summary of disabled checks","text":""},{"location":"cis_benchmark/#master-node-security-configuration","title":"Master Node Security Configuration","text":"

The current configuration has in total 8 master checks disabled:

  1. id: 1.2.10 - EventRateLimit requires external yaml config. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the admission control plugin EventRateLimit is set (Manual)\"\n
  2. id: 1.2.12 - By default this isn't passed to the apiserver for air-gap functionality

    type: skip\ntext: \"Ensure that the admission control plugin AlwaysPullImages is set (Manual)\"\n
  3. id: 1.2.22 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-path argument is set (Automated)\"\n
  4. id: 1.2.23 - For sake of simplicity of k0s all audit configuration are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)\"\n
  5. id: 1.2.24 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)\"\n
  6. id: 1.2.25 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it

    type: skip\ntext: \"Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)\"\n
  7. id: 1.2.33 - By default it is not enabled. Left for the users to decide

    type: skip\ntext: \"Ensure that the --encryption-provider-config argument is set as appropriate (Manual)\"\n
  8. id: 1.2.34 - By default it is not enabled. Left for the users to decide

    type: skip\ntext: \"Ensure that encryption providers are appropriately configured (Manual)\"\n
"},{"location":"cis_benchmark/#worker-node-security-configuration","title":"Worker Node Security Configuration","text":"

and 4 node checks disabled:

  1. id: 4.1.1 - not applicable since k0s does not use kubelet service file

    type: skip\ntext: \"Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)\"\n
  2. id: 4.1.2 - not applicable since k0s does not use kubelet service file

    type: skip\ntext: \"Ensure that the kubelet service file ownership is set to root:root (Automated)\"\n
  3. id: 4.2.6 - k0s does not set this. See https://github.com/kubernetes/kubernetes/issues/66693

    type: skip\ntext: \"Ensure that the --protect-kernel-defaults argument is set to true (Automated)\"\n
  4. id: 4.2.10 - k0s doesn't set this up because certs get auto rotated

    type: skip\ntext: \"Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)\"\n
"},{"location":"cis_benchmark/#control-plane-configuration","title":"Control Plane Configuration","text":"

3 checks for the control plane:

  1. id: 3.1.1 - For purpose of being fully automated k0s is skipping this check

    type: skip\ntext: \"Client certificate authentication should not be used for users (Manual)\"\n
  2. id: 3.2.1 - out-of-the box configuration does not have any audit policy configuration but users can customize it in spec.api.extraArgs section of the config

    type: skip\ntext: \"Ensure that a minimal audit policy is created (Manual)\"\n
  3. id: 3.2.2 - Same as previous

    type: skip\ntext: \"Ensure that the audit policy covers key security concerns (Manual)\"\n
"},{"location":"cis_benchmark/#kubernetes-policies","title":"Kubernetes Policies","text":"

Policy checks are also disabled. The checks are manual and are up to the end user to decide on them.

"},{"location":"cloud-providers/","title":"Cloud providers","text":"

k0s builds Kubernetes components in providerless mode, meaning that cloud providers are not built into k0s-managed Kubernetes components. As such, you must externally configure the cloud providers to enable their support in your k0s cluster (for more information on running Kubernetes with cloud providers, refer to the Kubernetes documentation.

"},{"location":"cloud-providers/#external-cloud-providers","title":"External Cloud Providers","text":""},{"location":"cloud-providers/#enable-cloud-provider-support-in-kubelet","title":"Enable cloud provider support in kubelet","text":"

Even when all components are built with providerless mode, you must be able to enable cloud provider mode for kubelet. To do this, run the workers with --enable-cloud-provider=true.

When deploying with k0sctl, you can add this into the installFlags of worker hosts.

spec:\nhosts:\n- ssh:\naddress: 10.0.0.1\nuser: root\nkeyPath: ~/.ssh/id_rsa\ninstallFlags:\n- --enable-cloud-provider\n- --kubelet-extra-args=\"--cloud-provider=external\"\nrole: worker\n
"},{"location":"cloud-providers/#deploy-the-cloud-provider","title":"Deploy the cloud provider","text":"

The easiest way to deploy cloud provider controllers is on the k0s cluster.

Use the built-in manifest deployer built into k0s to deploy your cloud provider as a k0s-managed stack. Next, just drop all required manifests into the /var/lib/k0s/manifests/aws/ directory, and k0s will handle the deployment.

Note: The prerequisites for the various cloud providers can vary (for example, several require that configuration files be present on all of the nodes). Refer to your chosen cloud provider's documentation as necessary.

"},{"location":"cloud-providers/#k0s-cloud-provider","title":"k0s Cloud Provider","text":"

Alternatively, k0s provides its own lightweight cloud provider that can be used to statically assign ExternalIP values to worker nodes via Kubernetes annotations. This is beneficial for those who need to expose worker nodes externally via static IP assignments.

To enable this functionality, add the parameter --enable-k0s-cloud-provider=true to all controllers, and --enable-cloud-provider=true to all workers.

Adding a static IP address to a node using kubectl:

kubectl annotate \\\nnode <node> \\\nk0sproject.io/node-ip-external=<external IP>\n

Both IPv4 and IPv6 addresses are supported.

"},{"location":"cloud-providers/#defaults","title":"Defaults","text":"

The default node refresh interval is 2m, which can be overridden using the --k0s-cloud-provider-update-frequency=<duration> parameter when launching the controller(s).

The default port that the cloud provider binds to can be overridden using the --k0s-cloud-provider-port=<int> parameter when launching the controller(s).

"},{"location":"commercial-support/","title":"Commercial support","text":"

Commercial support for k0s if offered by Mirantis Inc..

Mirantis can provide various different levels of support starting from DevCare (9-to-5) all the way to OpsCare+ with fully managed service.

On top of our normal release and support model our commercial customers have access to critical security patches even for released versions that fall outside of the Open Source maintained releases.1 Commercial support also includes support for k0s related tooling such as k0sctl.

If you are interested in commercial support for k0s check out our support description and please contact us for further details.

  1. This is assuming there is a compatible release of upstream project with the fix\u00a0\u21a9

"},{"location":"configuration-validation/","title":"Configuration validation","text":"

k0s command-line interface has the ability to validate config syntax:

k0s validate config --config path/to/config/file\n

validate config sub-command can validate the following:

  1. YAML formatting
  2. SAN addresses
  3. Network providers
  4. Worker profiles
"},{"location":"configuration/","title":"Configuration options","text":""},{"location":"configuration/#using-a-configuration-file","title":"Using a configuration file","text":"

k0s can be installed without a config file. In that case the default configuration will be used. You can, though, create and run your own non-default configuration (used by the k0s controller nodes).

k0s supports providing only partial configurations. In case of partial configuration is provided, k0s will use the defaults for any missing values.

  1. Generate a yaml config file that uses the default settings.

    mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
  2. Modify the new yaml config file according to your needs, refer to Configuration file reference below. You can remove the default values if wanted as k0s supports partial configs too.

  3. Install k0s with your new config file.

    sudo k0s install controller -c /etc/k0s/k0s.yaml\n
  4. If you need to modify your existing configuration later on, you can change your config file also when k0s is running, but remember to restart k0s to apply your configuration changes.

    sudo k0s stop\nsudo k0s start\n
"},{"location":"configuration/#configuring-k0s-via-k0sctl","title":"Configuring k0s via k0sctl","text":"

k0sctl can deploy your configuration options at cluster creation time. Your options should be placed in the spec.k0s.config section of the k0sctl's configuration file. See the section on how to install k0s via k0sctl and the k0sctl README for more information.

"},{"location":"configuration/#configuration-file-reference","title":"Configuration file reference","text":"

CAUTION: As many of the available options affect items deep in the stack, you should fully understand the correlation between the configuration file components and your specific environment before making any changes.

A YAML config file follows, with defaults as generated by the k0s config create command:

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\napi:\naddress: 192.168.68.104\nexternalAddress: my-lb-address.example.com\nk0sApiPort: 9443\nport: 6443\nsans:\n- 192.168.68.104\ncontrollerManager: {}\nextensions:\nhelm:\nconcurrencyLevel: 5\ncharts: null\nrepositories: null\nstorage:\ncreate_default_storage_class: false\ntype: external_storage\ninstallConfig:\nusers:\netcdUser: etcd\nkineUser: kube-apiserver\nkonnectivityUser: konnectivity-server\nkubeAPIserverUser: kube-apiserver\nkubeSchedulerUser: kube-scheduler\nkonnectivity:\nadminPort: 8133\nagentPort: 8132\nnetwork:\ncalico: null\nclusterDomain: cluster.local\ndualStack: {}\nkubeProxy:\nmetricsBindAddress: 0.0.0.0:10249\nmode: iptables\nkuberouter:\nautoMTU: true\nhairpin: Enabled\nipMasq: false\nmetricsPort: 8080\nmtu: 0\npeerRouterASNs: \"\"\npeerRouterIPs: \"\"\nnodeLocalLoadBalancing:\nenabled: false\nenvoyProxy:\napiServerBindPort: 7443\nimage:\nimage: docker.io/envoyproxy/envoy-distroless\nversion: v1.24.1\nkonnectivityServerBindPort: 7132\ntype: EnvoyProxy\npodCIDR: 10.244.0.0/16\nprovider: kuberouter\nserviceCIDR: 10.96.0.0/12\nscheduler: {}\nstorage:\netcd:\nexternalCluster: null\npeerAddress: 192.168.68.104\ntype: etcd\ntelemetry:\nenabled: true\nfeatureGates:\n- name: feature_XXX\nenabled: true\ncomponents: [\"kubelet\", \"kube-api\", \"kube-scheduler\"]\n- name: feature_YYY\nenabled: true\n-\nname: feature_ZZZ\nenabled: false\n
"},{"location":"configuration/#spec-key-detail","title":"spec Key Detail","text":""},{"location":"configuration/#specapi","title":"spec.api","text":"Element Description externalAddress The loadbalancer address (for k0s controllers running behind a loadbalancer). Configures all cluster components to connect to this address and also configures this address for use when joining new nodes to the cluster. address Local address on which to bind an API. Also serves as one of the addresses pushed on the k0s create service certificate on the API. Defaults to first non-local address found on the node. sans List of additional addresses to push to API servers serving the certificate. extraArgs Map of key-values (strings) for any extra arguments to pass down to Kubernetes api-server process. port\u00b9 Custom port for kube-api server to listen on (default: 6443) k0sApiPort\u00b9 Custom port for k0s-api server to listen on (default: 9443)

\u00b9 If port and k0sApiPort are used with the externalAddress element, the loadbalancer serving at externalAddress must listen on the same ports.

"},{"location":"configuration/#specstorage","title":"spec.storage","text":"Element Description type Type of the data store (valid values:etcd or kine). Note: Type etcd will cause k0s to create and manage an elastic etcd cluster within the controller nodes. etcd.peerAddress Node address used for etcd cluster peering. etcd.extraArgs Map of key-values (strings) for any extra arguments to pass down to etcd process. kine.dataSource kine datasource URL."},{"location":"configuration/#specnetwork","title":"spec.network","text":"Element Description provider Network provider (valid values: calico, kuberouter, or custom). For custom, you can push any network provider (default: kuberouter). Be aware that it is your responsibility to configure all of the CNI-related setups, including the CNI provider itself and all necessary host levels setups (for example, CNI binaries). Note: Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment. podCIDR Pod network CIDR to use in the cluster. serviceCIDR Network CIDR to use for cluster VIP services. clusterDomain Cluster Domain to be passed to the kubelet and the coredns configuration."},{"location":"configuration/#specnetworkcalico","title":"spec.network.calico","text":"Element Description mode vxlan (default), ipip or bird overlay Overlay mode: Always (default), CrossSubnet or Never (requires mode=vxlan to disable calico overlay-network). vxlanPort The UDP port for VXLAN (default: 4789). vxlanVNI The virtual network ID for VXLAN (default: 4096). mtu MTU for overlay network (default: 0, which causes Calico to detect optimal MTU during bootstrap). wireguard Enable wireguard-based encryption (default: false). Your host system must be wireguard ready (refer to the Calico documentation for details). flexVolumeDriverPath The host path for Calicos flex-volume-driver(default: /usr/libexec/k0s/kubelet-plugins/volume/exec/nodeagent~uds). Change this path only if the default path is unwriteable (refer to Project Calico Issue #2712 for details). Ideally, you will pair this option with a custom volumePluginDir in the profile you use for your worker nodes. ipAutodetectionMethod Use to force Calico to pick up the interface for pod network inter-node routing (default: \"\", meaning not set, so that Calico will instead use its defaults). For more information, refer to the Calico documentation. envVars Map of key-values (strings) for any calico-node environment variable."},{"location":"configuration/#specnetworkcalicoenvvars","title":"spec.network.calico.envVars","text":"

Environment variable's value must be string, e.g.:

spec:\nnetwork:\nprovider: calico\ncalico:\nenvVars:\nTEST_BOOL_VAR: \"true\"\nTEST_INT_VAR: \"42\"\nTEST_STRING_VAR: test\n

K0s runs Calico with some predefined vars, which can be overwritten by setting new value in spec.network.calico.envVars:

CALICO_IPV4POOL_CIDR: \"{{ spec.network.podCIDR }}\"\nCALICO_DISABLE_FILE_LOGGING: \"true\"\nFELIX_DEFAULTENDPOINTTOHOSTACTION: \"ACCEPT\"\nFELIX_LOGSEVERITYSCREEN: \"info\"\nFELIX_HEALTHENABLED: \"true\"\nFELIX_PROMETHEUSMETRICSENABLED: \"true\"\nFELIX_FEATUREDETECTOVERRIDE: \"ChecksumOffloadBroken=true\"\n

FELIX_FEATUREDETECTOVERRIDE: ChecksumOffloadBroken=true disables VXLAN offloading because of projectcalico/calico#4727.

In SingleStack mode there are additional vars:

FELIX_IPV6SUPPORT: \"false\"\n

In DualStack mode there are additional vars:

CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\nFELIX_IPV6SUPPORT: \"true\"\nIP6: \"autodetect\"\nCALICO_IPV6POOL_CIDR: \"{{ spec.network.dualStack.IPv6podCIDR }}\"\n
"},{"location":"configuration/#specnetworkkuberouter","title":"spec.network.kuberouter","text":"Element Description autoMTU Autodetection of used MTU (default: true). mtu Override MTU setting, if autoMTU must be set to false). metricsPort Kube-router metrics server port. Set to 0 to disable metrics (default: 8080). peerRouterIPs Comma-separated list of global peer addresses. peerRouterASNs Comma-separated list of global peer ASNs. hairpin Hairpin mode, supported modes Enabled: enabled cluster wide, Allowed: must be allowed per service using annotations, Disabled: doesn't work at all (default: Enabled) hairpinMode Deprecated Use hairpin instead. If both hairpin and hairpinMode are defined, this is ignored. If only hairpinMode is configured explicitly activates hairpinMode (https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode). ipMasq IP masquerade for traffic originating from the pod network, and destined outside of it (default: false)

Note: Kube-router allows many networking aspects to be configured per node, service, and pod (for more information, refer to the Kube-router user guide).

"},{"location":"configuration/#specnetworkkubeproxy","title":"spec.network.kubeProxy","text":"Element Description disabled Disable kube-proxy altogether (default: false). mode Kube proxy operating mode, supported modes iptables, ipvs, userspace (default: iptables) iptables Kube proxy iptables settings ipvs Kube proxy ipvs settings nodePortAddresses Kube proxy nodePortAddresses

Default kube-proxy iptables settings:

iptables:\nmasqueradeAll: false\nmasqueradeBit: null\nminSyncPeriod: 0s\nsyncPeriod: 0s\n

Default kube-proxy ipvs settings:

ipvs:\nexcludeCIDRs: null\nminSyncPeriod: 0s\nscheduler: \"\"\nstrictARP: false\nsyncPeriod: 0s\ntcpFinTimeout: 0s\ntcpTimeout: 0s\nudpTimeout: 0s\n
"},{"location":"configuration/#specnetworknodelocalloadbalancing","title":"spec.network.nodeLocalLoadBalancing","text":"

Configuration options related to k0s's node-local load balancing feature.

Note: This feature is experimental! Expect instabilities and/or breaking changes.

Element Description enabled Indicates if node-local load balancing should be used to access Kubernetes API servers from worker nodes. Default: false. type The type of the node-local load balancer to deploy on worker nodes. Default: EnvoyProxy. (This is the only option for now.) envoyProxy Configuration options related to the \"EnvoyProxy\" type of load balancing."},{"location":"configuration/#specnetworknodelocalloadbalancingenvoyproxy","title":"spec.network.nodeLocalLoadBalancing.envoyProxy","text":"

Configuration options required for using Envoy as the backing implementation for node-local load balancing.

Note: This type of load balancing is not supported on ARMv7 workers.

Element Description image The OCI image that's being used for the Envoy Pod. imagePullPolicy The pull policy being used used for the Envoy Pod. Defaults to spec.images.default_pull_policy if omitted. apiServerBindPort Port number on which to bind the Envoy load balancer for the Kubernetes API server to on a worker's loopback interface. Default: 7443. konnectivityServerBindPort Port number on which to bind the Envoy load balancer for the konnectivity server to on a worker's loopback interface. Default: 7132."},{"location":"configuration/#speccontrollermanager","title":"spec.controllerManager","text":"Element Description extraArgs Map of key-values (strings) for any extra arguments you want to pass down to the Kubernetes controller manager process."},{"location":"configuration/#specscheduler","title":"spec.scheduler","text":"Element Description extraArgs Map of key-values (strings) for any extra arguments you want to pass down to Kubernetes scheduler process."},{"location":"configuration/#specworkerprofiles","title":"spec.workerProfiles","text":"

Worker profiles are used to manage worker-specific configuration in a centralized manner. A ConfigMap is generated for each worker profile. Based on the --profile argument given to the k0s worker, the configuration in the corresponding ConfigMap is is picked up during startup.

The worker profiles are defined as an array. Each element has following properties:

Property Description name String; name to use as profile selector for the worker process values Object; Kubelet configuration overrides, see below for details"},{"location":"configuration/#specworkerprofilesvalues-kubelet-configuration-overrides","title":"spec.workerProfiles[].values (Kubelet configuration overrides)","text":"

The Kubelet configuration overrides of a profile override the defaults defined by k0s.

Note that there are several fields that cannot be overridden:

  • clusterDNS
  • clusterDomain
  • apiVersion
  • kind
  • staticPodURL
"},{"location":"configuration/#specfeaturegates","title":"spec.featureGates","text":"

Available components are:

  • kube-apiserver
  • kube-controller-manager
  • kubelet
  • kube-scheduler
  • kube-proxy

If components are omitted, propagates to all kube components.

Modifies extraArgs.

"},{"location":"configuration/#example","title":"Example","text":"
spec:\nfeatureGates:\n- name: feature-gate-0\nenabled: true\ncomponents: [\"kube-apiserver\", \"kube-controller-manager\", \"kubelet\", \"kube-scheduler\"]\n- name: feature-gate-1\nenabled: true\n- name: feature-gate-2\nenabled: false\n
"},{"location":"configuration/#kubelet-feature-gates-example","title":"Kubelet feature gates example","text":"

The below is an example of a k0s config with feature gates enabled:

spec:\nfeatureGates:\n- name: DevicePlugins\nenabled: true\ncomponents: [\"kubelet\"]\n- name: Accelerators\nenabled: true\ncomponents: [\"kubelet\"]\n- name: AllowExtTrafficLocalEndpoints\nenabled: false\n
"},{"location":"configuration/#configuration-examples","title":"Configuration examples","text":""},{"location":"configuration/#custom-volumeplugindir","title":"Custom volumePluginDir","text":"
spec:\nworkerProfiles:\n- name: custom-pluginDir\nvalues:\nvolumePluginDir: /var/libexec/k0s/kubelet-plugins/volume/exec\n
"},{"location":"configuration/#eviction-policy","title":"Eviction Policy","text":"
spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nevictionHard:\nmemory.available: \"500Mi\"\nnodefs.available: \"1Gi\"\nimagefs.available: \"100Gi\"\nevictionMinimumReclaim:\nmemory.available: \"0Mi\"\nnodefs.available: \"500Mi\"\nimagefs.available: \"2Gi\"\n
"},{"location":"configuration/#unsafe-sysctls","title":"Unsafe Sysctls","text":"
spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nallowedUnsafeSysctls:\n- fs.inotify.max_user_instances\n
"},{"location":"configuration/#specimages","title":"spec.images","text":"

Nodes under the images key all have the same basic structure:

spec:\nimages:\ncoredns:\nimage: quay.io/coredns/coredns\nversion: v1.7.0\n

If you want the list of default images and their versions to be included, use k0s config create --include-images.

"},{"location":"configuration/#available-keys","title":"Available keys","text":"
  • spec.images.konnectivity
  • spec.images.metricsserver
  • spec.images.kubeproxy
  • spec.images.coredns
  • spec.images.pause
  • spec.images.calico.cni
  • spec.images.calico.flexvolume
  • spec.images.calico.node
  • spec.images.calico.kubecontrollers
  • spec.images.kuberouter.cni
  • spec.images.kuberouter.cniInstaller
  • spec.images.repository\u00b9

\u00b9 If spec.images.repository is set and not empty, every image will be pulled from images.repository

If spec.images.default_pull_policy is set and not empty, it will be used as a pull policy for each bundled image.

"},{"location":"configuration/#image-example","title":"Image example","text":"
images:\nrepository: \"my.own.repo\"\nkonnectivity:\nimage: calico/kube-controllers\nversion: v3.16.2\nmetricsserver:\nimage: registry.k8s.io/metrics-server/metrics-server\nversion: v0.6.4\n

In the runtime the image names are calculated as my.own.repo/calico/kube-controllers:v3.16.2 and my.own.repo/metrics-server/metrics-server:v0.6.4. This only affects the the imgages pull location, and thus omitting an image specification here will not disable component deployment.

"},{"location":"configuration/#specextensionshelm","title":"spec.extensions.helm","text":"

spec.extensions.helm is the config file key in which you configure the list of Helm repositories and charts to deploy during cluster bootstrap (for more information, refer to Helm Charts).

"},{"location":"configuration/#specextensionsstorage","title":"spec.extensions.storage","text":"

spec.extensions.storage controls bundled storage provider. The default value external makes no storage deployed.

To enable embedded host-local storage provider use the following configuration:

spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
"},{"location":"configuration/#speckonnectivity","title":"spec.konnectivity","text":"

The spec.konnectivity key is the config file key in which you configure Konnectivity-related settings.

  • agentPort agent port to listen on (default 8132)
  • adminPort admin port to listen on (default 8133)
"},{"location":"configuration/#spectelemetry","title":"spec.telemetry","text":"

To improve the end-user experience k0s is configured by defaul to collect telemetry data from clusters and send it to the k0s development team. To disable the telemetry function, change the enabled setting to false.

The telemetry interval is ten minutes.

spec:\ntelemetry:\nenabled: true\n
"},{"location":"configuration/#disabling-controller-components","title":"Disabling controller components","text":"

k0s allows to completely disable some of the system components. This allows users to build a minimal Kubernetes control plane and use what ever components they need to fulfill their need for the control plane. Disabling the system components happens through a command line flag for the controller process:

--disable-components strings                     disable components (valid items: api-config,autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n

Note: As of k0s 1.26, the kubelet-config component has been replaced by the worker-config component. k0s will issue a warning when the old component name is being used. It is scheduled for removal in k0s 1.27. Please update to the new component name.

If you use k0sctl, just add the flag when installing the cluster for the first controller at spec.hosts.installFlags in the config file like e.g.:

spec:\nhosts:\n- role: controller\ninstallFlags:\n- --disable-components=metrics-server\n

As seen from the component list, the only always-on component is the Kubernetes api-server, without that k0s serves no purpose.

"},{"location":"conformance-testing/","title":"Kubernetes conformance testing for k0s","text":"

We run the conformance testing for the last RC build for a release. Follow the instructions as the conformance testing repository.

In a nutshell, you need to:

  • Setup k0s on some VMs/bare metal boxes
  • Download, if you do not already have, sonobuoy tool
  • Run the conformance tests with something like sonobuoy run --mode=certified-conformance
  • Wait for couple hours
  • Collect results
"},{"location":"containerd_config/","title":"Containerd config","text":"

See runtime.

"},{"location":"custom-ca/","title":"Install using custom CA certificates and SA key pair","text":"

k0s generates all needed certificates automatically in the <data-dir>/pki directory (/var/lib/k0s/pki, by default).

But sometimes there is a need to have the CA certificates and SA key pair in advance. To make it work, just put files to the <data-dir>/pki and <data-dir>/pki/etcd:

export LIFETIME=365\nmkdir -p /var/lib/k0s/pki/etcd\ncd /var/lib/k0s/pki\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days $LIFETIME -out ca.crt -subj \"/CN=Custom CA\"\nopenssl genrsa -out sa.key 2048\nopenssl rsa -in sa.key -outform PEM -pubout -out sa.pub\ncd ./etcd\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days $LIFETIME -out ca.crt -subj \"/CN=Custom CA\"\n

Then you can install k0s as usual.

"},{"location":"custom-ca/#pre-generated-tokens","title":"Pre-generated tokens","text":"

It's possible to get join in advance without having a running cluster.

k0s token pre-shared --role worker --cert /var/lib/k0s/pki/ca.crt --url https://<controller-ip>:6443/\n

The command above generates a join token and a Secret. A Secret should be deployed to the cluster to authorize the token. For example, you can put the Secret under the manifest directory and it will be deployed automatically.

"},{"location":"custom-cri-runtime/","title":"Custom cri runtime","text":"

See runtime.

"},{"location":"dockershim/","title":"Dockershim deprecation - what does it mean for K0s?","text":"

Back in December 2020, Kubernetes have announced the deprecation of the dockershim from version 1.24 onwards. As a consequence, k0s 1.24 and above don't support the dockershim as well.

"},{"location":"dockershim/#what-is-dockershim-and-why-was-it-deprecated","title":"What is dockershim and why was it deprecated?","text":"

The dockershim is a transparent library that intercepts API calls to the kubernetes API and handles their operation in the Docker API. Early versions of Kubernetes used this shim in order to allow containers to run over docker. Later versions of Kubernetes started creating containers via the CRI (Container Runtime Interface). Since CRI has become the de-facto default runtime for Kubernetes, maintaining the dockershim turned into a heavy burden for Kubernetes maintainers, and so the decision to deprecate the built-in dockershim support came into being.

"},{"location":"dockershim/#so-whats-going-to-happen-to-dockershim","title":"So what's going to happen to dockershim?","text":"

Dockershim is not gone. It's only changed ownership. Mirantis has agreed to maintain dockershim (now called cri-dockerd). See: The Future of Dockershim is cri-dockerd.

From Kubernetes version 1.24 you will have the built-in possibility to run containers via CRI, but if you want to continue using docker, you are free to do so, using cri-dockerd.

In order to continue to use the Docker engine with Kubernetes v1.24+, you will have to migrated all worker nodes to use cri-dockerd.

"},{"location":"dockershim/#migrating-to-cri-dockerd","title":"Migrating to CRI-Dockerd","text":"

This migration guide assumes that you've been running k0s with docker on version 1.23 and below.

The following steps will need to be done on ALL k0s' worker nodes, or single-node controllers. Basically any node that runs containers will need to be migrated using the process detailed below.

Please note that there are currently some pitfalls around container metrics when using CRI-dockerd.

"},{"location":"dockershim/#cordon-and-drain-the-node","title":"Cordon and drain the node","text":"

Get a list of all nodes (k0s is still version 1.23, which already includes the docker-shim):

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS   ROLES           AGE   VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready    control-plane   52m   v1.28.1+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready    <none>          12s   v1.28.1+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n

cordon and drain the nodes (migrate one by one):

sudo k0s kubectl cordon ip-10-0-62-250.eu-west-1.compute.internal \nsudo k0s kubectl drain ip-10-0-62-250.eu-west-1.compute.internal --ignore-daemonsets\n
sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS                     ROLES           AGE     VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready                      control-plane   56m     v1.28.1+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready,SchedulingDisabled   <none>          3m40s   v1.28.1+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n

Stop k0s on the node:

sudo k0s stop\n
"},{"location":"dockershim/#installing-cri-dockerd","title":"Installing CRI-Dockerd","text":"

Download the Latest cri-dockerd deb package:

cd /tmp\n\n# Get the deb file name for ubuntu-jammy\nOS=\"ubuntu-jammy\"\nPKG=$(curl -s https://api.github.com/repos/Mirantis/cri-dockerd/releases/latest | grep ${OS} | grep http | cut -d '\"' -f 4)\n\nwget ${PKG} -O cri-dockerd-latest.deb\n\nsudo dpkg -i cri-dockerd-latest.deb\n\nSelecting previously unselected package cri-dockerd.\n(Reading database ... 164618 files and directories currently installed.)\nPreparing to unpack cri-dockerd-latest.deb ...\nUnpacking cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nSetting up cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nCreated symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service \u2192 /lib/systemd/system/cri-docker.service.\nCreated symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket \u2192 /lib/systemd/system/cri-docker.socket.\n

Verify the correct version:

which cri-dockerd\n/usr/bin/cri-dockerd\n\ncri-dockerd --version\ncri-dockerd 0.2.1 (HEAD)\n

Make sure dockershim is started:

sudo systemctl status cri-docker.service\n\u25cf cri-docker.service - CRI Interface for Docker Application Container Engine\n     Loaded: loaded (/lib/systemd/system/cri-docker.service; enabled; vendor preset: enabled)\nActive: active (running) since Wed 2022-05-25 14:27:31 UTC; 1min 23s ago\nTriggeredBy: \u25cf cri-docker.socket\n       Docs: https://docs.mirantis.com\n   Main PID: 1404151 (cri-dockerd)\nTasks: 9\nMemory: 15.3M\n     CGroup: /system.slice/cri-docker.service\n             \u2514\u25001404151 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=\n
"},{"location":"dockershim/#configure-k0s-to-use-dockershim","title":"Configure K0s to use dockershim","text":"

Replace docker socket in the systemd file for cri-dockerd (the step below should be run AFTER upgrading k0s to version 1.24):

sudo sed -i -e 's_--cri-socket=docker:unix:///var/run/docker.sock_--cri-socket docker:unix:///var/run/cri-dockerd.sock_' /etc/systemd/system/k0sworker.service\nsudo systemctl daemon-reload\n
"},{"location":"dockershim/#start-k0s-with-cri-dockerd","title":"Start k0s with cri-dockerd","text":"
sudo k0s start\n

Verify the running pods via docker ps:

docker ps --format \"table {{.ID}}\\t{{.Names}}\\t{{.State}}\\t{{.Status}}\\t{{.Image}}\"\n\nCONTAINER ID   NAMES                                                                                                STATE     STATUS          IMAGE\n1b9b4624ddfd   k8s_konnectivity-agent_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_1   running   Up 51 minutes   quay.io/k0sproject/apiserver-network-proxy-agent\n414758a8a951   k8s_kube-router_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_1                 running   Up 51 minutes   3a67679337a5\nb81960bb304c   k8s_kube-proxy_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_1                   running   Up 51 minutes   quay.io/k0sproject/kube-proxy\nfb888cbc5ae0   k8s_POD_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_0                         running   Up 51 minutes   registry.k8s.io/pause:3.1\n382d0a938c9d   k8s_POD_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_0                  running   Up 51 minutes   registry.k8s.io/pause:3.1\n72d4a47b5609   k8s_POD_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_0                          running   Up 51 minutes   registry.k8s.io/pause:3.1\n

On the controller, you'll be able to see the worker started with the new docker container runtime:

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS                     ROLES           AGE    VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready                      control-plane   117m   v1.28.1+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready,SchedulingDisabled   <none>          64m    v1.28.1+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n
"},{"location":"dockershim/#uncordon-the-node","title":"Uncordon the Node","text":"
sudo k0s kubectl uncordon ip-10-0-62-250.eu-west-1.compute.internal\n\nnode/ip-10-0-62-250.eu-west-1.compute.internal uncordoned\n

You should now see the node Ready for scheduling with the docker Runtime:

sudo k0s kubectl get nodes -o wide\n\nNAME                                        STATUS   ROLES           AGE    VERSION       INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION    CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal   Ready    control-plane   119m   v1.28.1+k0s   10.0.49.188   <none>        Ubuntu 20.04.4 LTS   5.13.0-1022-aws   docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal   Ready    <none>          66m    v1.28.1+k0s   10.0.62.250   <none>        Ubuntu 20.04.4 LTS   5.13.0-1017-aws   docker://20.10.16\n
"},{"location":"dual-stack/","title":"Dual-stack Networking","text":"

Note: Dual stack networking setup requires that you configure Calico or a custom CNI as the CNI provider.

Use the following k0s.yaml as a template to enable dual-stack networking. This configuration will set up bundled calico CNI, enable feature gates for the Kubernetes components, and set up kubernetes-controller-manager.

spec:\nnetwork:\npodCIDR: \"10.244.0.0/16\"\nserviceCIDR: \"10.96.0.0/12\"\nprovider: calico\ncalico:\nmode: \"bird\"\ndualStack:\nenabled: true\nIPv6podCIDR: \"fd00::/108\"\nIPv6serviceCIDR: \"fd01::/108\"\n
"},{"location":"dual-stack/#cni-settings-calico","title":"CNI Settings: Calico","text":"

For cross-pod connectivity, use BIRD for the backend. Calico does not support tunneling for the IPv6, and thus VXLAN and IPIP backends do not work.

Note: In any Calico mode other than cross-pod, the pods can only reach pods on the same node.

"},{"location":"dual-stack/#cni-settings-external-cni","title":"CNI Settings: External CNI","text":"

Although the k0s.yaml dualStack section enables all of the neccessary feature gates for the Kubernetes components, for use with an external CNI it must be set up to support IPv6.

"},{"location":"dual-stack/#additional-resources","title":"Additional Resources","text":"
  • https://kubernetes.io/docs/concepts/services-networking/dual-stack/
  • https://kubernetes.io/docs/tasks/network/validate-dual-stack/
  • https://www.projectcalico.org/dual-stack-operation-with-calico-on-kubernetes/
  • https://docs.projectcalico.org/networking/ipv6
"},{"location":"dynamic-configuration/","title":"Dynamic configuration","text":"

k0s comes with the option to enable dynamic configuration for cluster level components. This covers all the components other than etcd (or sqlite) and the Kubernetes api-server. This option enables k0s configuration directly via Kubernetes API as opposed to using a configuration file for all cluster configuration.

This feature has to be enabled for every controller in the cluster using the --enable-dynamic-config flag in k0s controller or k0s install controller commands. Having both types of controllers in the same cluster will cause a conflict.

"},{"location":"dynamic-configuration/#dynamic-vs-static-configuration","title":"Dynamic vs. static configuration","text":"

The existing and enabled-by-default method is what we call static configuration. That's the way where the k0s process reads the config from the given YAML file (or uses the default config if no config is given by user) and configures every component accordingly. This means that for any configuration change the cluster admin has to restart all controllers on the cluster and have matching configs on each controller node.

In dynamic configuration mode the first controller to boot up when the cluster is created will use the given config YAML as a bootstrap configuration and stores it in the Kubernetes API. All the other controllers will find the config existing on the API and will use it as the source-of-truth for configuring all the components except for etcd and kube-apiserver. After the initial cluster bootstrap the source of truth for all controllers is the configuration object in the Kubernetes API.

"},{"location":"dynamic-configuration/#cluster-configuration-vs-controller-node-configuration","title":"Cluster configuration vs. controller node configuration","text":"

In the k0s configuration options there are some options that are cluster-wide and some that are specific to each controller node in the cluster. The following list outlines which options are controller node specific and have to be configured only via the local file:

  • spec.api - these options configure how the local Kubernetes API server is setup
  • spec.storage - these options configure how the local storage (etcd or sqlite) is setup

In case of HA control plane, all the controllers will need this part of the configuration as otherwise they will not be able to get the storage and Kubernetes API server running.

"},{"location":"dynamic-configuration/#configuration-location","title":"Configuration location","text":"

The cluster wide configuration is stored in the Kubernetes API as a custom resource called clusterconfig. There's currently only one instance named k0s. You can edit the configuration with what ever means possible, for example with:

k0s config edit\n

This will open the configuration object for editing in your system's default editor.

"},{"location":"dynamic-configuration/#configuration-reconciliation","title":"Configuration reconciliation","text":"

The dynamic configuration uses the typical operator pattern for operation. k0s controller will detect when the object changes and will reconcile the configuration changes to be reflected to how different components are configured. So say you want to change the MTU setting for kube-router CNI networking you'd change the config to contain e.g.:

    kuberouter:\nmtu: 1350\nautoMTU: false\n

This will change the kube-router related configmap and thus make kube-router to use different MTU settings for new pods.

"},{"location":"dynamic-configuration/#configuration-options","title":"Configuration options","text":"

The configuration object is a 1-to-1 mapping with the existing configuration YAML. All the configuration options EXCEPT options under spec.api and spec.storage are dynamically reconciled.

As with any Kubernetes cluster there are certain things that just cannot be changed on-the-fly, this is the list of non-changeable options:

  • network.podCIDR
  • network.serviceCIDR
  • network.provider
"},{"location":"dynamic-configuration/#configuration-status","title":"Configuration status","text":"

The dynamic configuration reconciler operator will write status events for all the changes it detects. To see all dynamic config related events, use:

k0s config status\n
LAST SEEN   TYPE      REASON                OBJECT              MESSAGE\n64s         Warning   FailedReconciling     clusterconfig/k0s   failed to validate config: [invalid pod CIDR invalid ip address]\n59s         Normal    SuccessfulReconcile   clusterconfig/k0s   Succesfully reconciler cluster config\n69s         Warning   FailedReconciling     clusterconfig/k0s   cannot change CNI provider from kuberouter to calico\n
"},{"location":"environment-variables/","title":"Environment variables","text":"

k0s install does not support environment variables.

Setting environment variables for components used by k0s depends on the used init system. The environment variables set in k0scontroller or k0sworker service will be inherited by k0s components, such as etcd, containerd, konnectivity, etc.

Component specific environment variables can be set in k0scontroller or k0sworker service. For example: for CONTAINERD_HTTPS_PROXY, the prefix CONTAINERD_ will be stripped and converted to HTTPS_PROXY in the containerd process.

For those components having env prefix convention such as ETCD_xxx, they are handled specially, i.e. the prefix will not be stripped. For example, ETCD_MAX_WALS will still be ETCD_MAX_WALS in etcd process.

The proxy envs HTTP_PROXY, HTTPS_PROXY, NO_PROXY are always overridden by component specific environment variables, so ETCD_HTTPS_PROXY will still be converted to HTTPS_PROXY in etcd process.

"},{"location":"environment-variables/#systemd","title":"SystemD","text":"

Create a drop-in directory and add config file with a desired environment variable:

mkdir -p /etc/systemd/system/k0scontroller.service.d\ntee -a /etc/systemd/system/k0scontroller.service.d/http-proxy.conf <<EOT\n[Service]\nEnvironment=HTTP_PROXY=192.168.33.10:3128\nEOT\n
"},{"location":"environment-variables/#openrc","title":"OpenRC","text":"

Export desired environment variable overriding service configuration in /etc/conf.d directory:

echo 'export HTTP_PROXY=\"192.168.33.10:3128\"' > /etc/conf.d/k0scontroller\n
"},{"location":"experimental-windows/","title":"Run k0s worker nodes in Windows","text":"

IMPORTANT: Windows support for k0s is under active development and must be considered experimental.

"},{"location":"experimental-windows/#prerequisites","title":"Prerequisites","text":"

The cluster must be running at least one worker node and control plane on Linux. You can use Windows to run additional worker nodes.

"},{"location":"experimental-windows/#run-k0s","title":"Run k0s","text":"

Note: The k0s.exe supervises kubelet.exe and kube-proxy.exe.

During the first run, the calico install script is created as C:\\bootstrap.ps1. This bootstrap script downloads the calico binaries, builds pause container and sets up vSwitch settings.

Install Mirantis Container Runtime on the Windows node(s), as it is required for the initial Calico set up).

k0s worker --cri-socket=docker:tcp://127.0.0.1:2375 --cidr-range=<cidr_range> --cluster-dns=<clusterdns> --api-server=<k0s api> <token>\n

You must initiate the Cluster control with the correct config.

"},{"location":"experimental-windows/#configuration","title":"Configuration","text":""},{"location":"experimental-windows/#strict-affinity","title":"Strict-affinity","text":"

You must enable strict affinity to run the windows node.

If the spec.network.calico.withWindowsNodes field is set to true (it is set to false by default) the additional calico related manifest /var/lib/k0s/manifests/calico/calico-IPAMConfig-ipamconfig.yaml is created with the following values:

---\napiVersion: crd.projectcalico.org/v1\nkind: IPAMConfig\nmetadata:\nname: default\nspec:\nstrictAffinity: true\n

Alternately, you can manually execute calicoctl:

calicoctl ipam configure --strictaffinity=true\n
"},{"location":"experimental-windows/#network-connectivity-in-aws","title":"Network connectivity in AWS","text":"

Disable the Change Source/Dest. Check option for the network interface attached to your EC2 instance. In AWS, the console option for the network interface is in the Actions menu.

"},{"location":"experimental-windows/#hacks","title":"Hacks","text":"

k0s offers the following CLI arguments in lieu of a formal means for passing cluster settings from controller plane to worker:

  • cidr-range
  • cluster-dns
  • api-server
"},{"location":"experimental-windows/#useful-commands","title":"Useful commands","text":""},{"location":"experimental-windows/#run-pod-with-cmdexe-shell","title":"Run pod with cmd.exe shell","text":"
kubectl run win --image=hello-world:nanoserver --command=true -i --attach=true -- cmd.exe\n
"},{"location":"experimental-windows/#manifest-for-pod-with-iis-web-server","title":"Manifest for pod with IIS web-server","text":"
apiVersion: v1\nkind: Pod\nmetadata:\nname: iis\nspec:\ncontainers:\n- name: iis\nimage: mcr.microsoft.com/windows/servercore/iis\nimagePullPolicy: IfNotPresent\n
"},{"location":"extensions/","title":"Cluster extensions","text":"

k0s allows users to use extensions to extend cluster functionality.

At the moment the only supported type of extensions is helm based charts.

The default configuration has no extensions.

"},{"location":"extensions/#helm-based-extensions","title":"Helm based extensions","text":""},{"location":"extensions/#configuration-example","title":"Configuration example","text":"
helm:\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"11.16.8\"\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n

By using the configuration above, the cluster would:

  • add stable and prometheus-community chart repositories
  • install the prometheus-community/prometheus chart of the specified version to the default namespace.

The chart installation is implemented by using CRD helm.k0sproject.io/Chart. For every given helm extension the cluster creates a Chart CRD instance. The cluster has a controller which monitors for the Chart CRDs, supporting the following operations:

  • install
  • upgrade
  • delete

For security reasons, the cluster operates only on Chart CRDs instantiated in the kube-system namespace, however, the target namespace could be any.

"},{"location":"extensions/#crd-definition","title":"CRD definition","text":"
apiVersion: helm.k0sproject.io/v1beta1\nkind: Chart\nmetadata:\ncreationTimestamp: \"2020-11-10T14:17:53Z\"\ngeneration: 2\nlabels:\nk0s.k0sproject.io/stack: helm\nname: k0s-addon-chart-test-addon\nnamespace: kube-system\nresourceVersion: \"627\"\nselfLink: /apis/helm.k0sproject.io/v1beta1/namespaces/kube-system/charts/k0s-addon-chart-test-addon\nuid: ebe59ed4-1ff8-4d41-8e33-005b183651ed\nspec:\nchartName: prometheus-community/prometheus\nnamespace: default\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nversion: 11.16.8\nstatus:\nappVersion: 2.21.0\nnamespace: default\nreleaseName: prometheus-1605017878\nrevision: 2\nupdated: 2020-11-10 14:18:08.235656 +0000 UTC m=+41.871656901\nversion: 11.16.8\n

The Chart.spec defines the chart information.

The Chart.status keeps the information about the last operation performed by the operator.

"},{"location":"external-runtime-deps/","title":"External runtime dependencies","text":"

k0s is packaged as a single binary, which includes all the needed components. All the binaries are statically linked which means that in typical use cases there's an absolute minimum of external runtime dependencies.

However, depending on the node role and cluster configuration, some of the underlying components may have specific dependencies, like OS level tools, packages and libraries. This page aims to provide a comprehensive overview.

The following command checks for known requirements on a host (currently only available on Linux):

k0s sysinfo\n
"},{"location":"external-runtime-deps/#a-unique-machine-id-for-multi-node-setups","title":"A unique machine ID for multi-node setups","text":"

Whenever k0s is run in a multi-node setup (i.e. the --single command line flag isn't used), k0s requires a machine ID: a unique host identifier that is somewhat stable across reboots. For Linux, this ID is read from the files /var/lib/dbus/machine-id or /etc/machine-id. For Windows, it's taken from the registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Cryptography\\MachineGuid. If neither of the OS specific sources yield a result, k0s will fallback to use a machine ID based on the hostname.

When running k0s on top of virtualized or containerized environments, you need to ensure that hosts get their own unique IDs, even if they have been created from the same image.

"},{"location":"external-runtime-deps/#linux-specific","title":"Linux specific","text":""},{"location":"external-runtime-deps/#linux-kernel-configuration","title":"Linux kernel configuration","text":"

Needless to say, as k0s operates Kubernetes worker nodes, there's a certain number of needed Linux kernel modules and configurations that we need in the system. This basically stems from the need to run both containers and also be able to set up networking for the containers.

The needed kernel configuration items are listed below. All of them are available in Kernel versions 4.3 and above. If running on older kernels, check if the distro in use has backported some features; nevertheless, it might meet the requirements. k0s will check the Linux kernel release as part of its pre-flight checks and issue a warning if it's below 3.10.

The list covers ONLY the k0s/kubernetes components\u2019 needs on worker nodes. Your own workloads may require more.

  • CONFIG_CGROUPS: Control Group support
    • CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem
    • CONFIG_CGROUP_PIDS: PIDs cgroup subsystem kubernetes/kubeadm#2335 (comment)
    • CONFIG_CGROUP_DEVICE: Device controller for cgroups
    • CONFIG_CPUSETS: Cpuset support
    • CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem
    • CONFIG_MEMCG: Memory Resource Controller for Control Groups
    • (optional) CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups kubernetes/kubeadm#2335 (comment)
    • CONFIG_CGROUP_SCHED: Group CPU scheduler
      • CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER kubernetes/kubeadm#2335 (comment)
        • (optional) CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED Required if CPU CFS quota enforcement is enabled for containers that specify CPU limits (--cpu-cfs-quota).
    • (optional) CONFIG_BLK_CGROUP: Block IO controller kubernetes/kubernetes#92287 (comment)
  • CONFIG_NAMESPACES: Namespaces support
    • CONFIG_UTS_NS: UTS namespace
    • CONFIG_IPC_NS: IPC namespace
    • CONFIG_PID_NS: PID namespace
    • CONFIG_NET_NS: Network namespace
  • CONFIG_NET: Networking support
    • CONFIG_INET: TCP/IP networking
    • CONFIG_NETFILTER: Network packet filtering framework (Netfilter)
      • (optional) CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration
      • CONFIG_NETFILTER_XTABLES: Netfilter Xtables support
        • CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support
        • CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support
  • CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem
  • CONFIG_PROC_FS: /proc file system support

Note: As part of its pre-flight checks, k0s will try to inspect and validate the kernel configuration. In order for that to succeed, the configuration needs to be accessible at runtime. There are some typical places that k0s will check. A bullet-proof way to ensure the accessibility is to enable CONFIG_IKCONFIG_PROC, and, if enabled as a module, to load the configs module: modprobe configs.

"},{"location":"external-runtime-deps/#control-groups-cgroups","title":"Control Groups (cgroups)","text":"

Both cgroup v1 and cgroup v2 are supported.

Required cgroup controllers:

  • cpu
  • cpuacct
  • cpuset
  • memory
  • devices
  • freezer
  • pids

Optional cgroup controllers:

  • hugetlb (kubernetes/kubeadm#2335 (comment))
  • blkio (kubernetes/kubernetes#92287 (comment)) containerd and cri-o will use blkio to track disk I/O and throttling in both cgroup v1 and v2.
"},{"location":"external-runtime-deps/#external-hard-dependencies","title":"External hard dependencies","text":"

There are very few external tools that are needed or used.

"},{"location":"external-runtime-deps/#mountumount","title":"mount/umount","text":"

When setting up pods, kubelet will call mount binary on the host. Similarly when destroying pods it will call umount. mount and umount are only needed on worker nodes where kubelet runs.

"},{"location":"external-runtime-deps/#external-soft-dependencies","title":"External soft dependencies","text":"

There are a few external tools that may be needed or used under specific circumstances:

"},{"location":"external-runtime-deps/#containerd-and-apparmor","title":"containerd and AppArmor","text":"

In order to use containerd in conjunction with AppArmor, it must be enabled in the kernel and the /sbin/apparmor_parser executable must be installed on the host, otherwise containerd will disable AppArmor support.

"},{"location":"external-runtime-deps/#iptables","title":"iptables","text":"

iptables may be executed to detect if there are any existing iptables rules and if those are in legacy of nft mode. If iptables is not found, k0s will assume that there are no pre-existing iptables rules.

"},{"location":"external-runtime-deps/#useradd-adduser","title":"useradd / adduser","text":"

During k0s install the external tool useradd will be used on the controllers to create system user accounts for k0s. If this does exist it will fall-back to busybox's adduser.

"},{"location":"external-runtime-deps/#userdel-deluser","title":"userdel / deluser","text":"

k0s reset will execute either userdel or deluser to clean up system user accounts.

"},{"location":"external-runtime-deps/#modprobe","title":"modprobe","text":"

On k0s worker modprobe will be executed to load missing kernel modules if they are not detected.

"},{"location":"external-runtime-deps/#id","title":"id","text":"

External /usr/bin/id will be executed as a fallback if local user lookup fails, in case NSS is used.

"},{"location":"external-runtime-deps/#other-dependencies-in-previous-versions-of-k0s","title":"Other dependencies in previous versions of k0s","text":"
  • up until k0s v1.21.9+k0s.0: iptables Required for worker nodes. Resolved by @ncopa in #1046 by adding iptables and friends to k0s's embedded binaries.
  • up until k0s v1.21.7+k0s.0: find, du and nice Required for worker nodes. Resolved upstream by @ncopa in kubernetes/kubernetes#96115, contained in Kubernetes 1.21.8 (5b13c8f68d4) and 1.22.0 (d45ba645a8f).
"},{"location":"external-runtime-deps/#windows-specific","title":"Windows specific","text":"

TBD.

"},{"location":"helm-charts/","title":"Helm Charts","text":"

Defining your extensions as Helm charts is one of two methods you can use to run k0s with your preferred extensions (the other being through the use of Manifest Deployer).

k0s supports two methods for deploying applications using Helm charts:

  • Use Helm command in runtime to install applications. Refer to the Helm Quickstart Guide for more information.
  • Insert Helm charts directly into the k0s configuration file, k0s.yaml. This method does not require a separate install of helm tool and the charts automatically deploy at the k0s bootstrap phase.
"},{"location":"helm-charts/#helm-charts-in-k0s-configuration","title":"Helm charts in k0s configuration","text":"

Adding Helm charts into the k0s configuration file gives you a declarative way in which to configure the cluster. k0s controller manages the setup of Helm charts that are defined as extensions in the k0s configuration file.

"},{"location":"helm-charts/#wait-for-install","title":"Wait for install","text":"

Each chart is proccesed the same way CLI tool does with following options:

  • --wait
  • --wait-for-jobs
  • --timeout 10m

It is possible to customize timeout by using .Timeout field.

"},{"location":"helm-charts/#chart-configuration","title":"Chart configuration","text":"Field Default value Description name - Release name chartname - chartname in form \"repository/chartname\" or path to tgz file version - version to install timeout 10m timeout to wait for release install values - yaml as a string, custom chart values namespace - namespace to install chart into order 0 order to apply manifest. For equal values, alphanum ordering is used"},{"location":"helm-charts/#example","title":"Example","text":"

In the example, Prometheus is configured from \"stable\" Helms chart repository. Add the following to k0s.yaml and restart k0s, after which Prometheus should start automatically with k0s.

spec:\nextensions:\nhelm:\nconcurrencyLevel: 5\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\n- name: helm-repo-with-auth\nurl: https://can-be-your-own-gitlab-ce-instance.org/api/v4/projects/PROJECTID/packages/helm/main\nusername: access-token-name-as-username\npassword: access-token-value-as-password\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"14.6.1\"\ntimeout: 20m\norder: 1\nvalues: |\nalertmanager:\npersistentVolume:\nenabled: false\nserver:\npersistentVolume:\nenabled: false\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\norder: 2\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist on all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\norder: 2 values: \"\"\nnamespace: default\n

Example extensions that you can use with Helm charts include:

  • Ingress controllers: Nginx ingress, Traefix ingress (refer to the k0s documentation for Installing the Traefik Ingress Controller)
  • Volume storage providers: OpenEBS, Rook, Longhorn
  • Monitoring: Prometheus, Grafana
"},{"location":"helm-charts/#helm-debug-logging","title":"Helm debug logging","text":"

Running k0s controller with --debug=true enables helm debug logging.

"},{"location":"high-availability/","title":"Control Plane High Availability","text":"

You can create high availability for the control plane by distributing the control plane across multiple nodes and installing a load balancer on top. Etcd can be colocated with the controller nodes (default in k0s) to achieve highly available datastore at the same time.

Note: In this context even 2 node controlplane is considered HA even though it's not really HA from etcd point of view. The same requirement for LB still applies.

"},{"location":"high-availability/#network-considerations","title":"Network considerations","text":"

You should plan to allocate the control plane nodes into different zones. This will avoid failures in case one zone fails.

For etcd high availability it's recommended to configure 3 or 5 controller nodes. For more information, refer to the etcd documentation.

"},{"location":"high-availability/#load-balancer","title":"Load Balancer","text":"

Control plane high availability requires a tcp load balancer, which acts as a single point of contact to access the controllers. The load balancer needs to allow and route traffic to each controller through the following ports:

  • 6443 (for Kubernetes API)
  • 8132 (for Konnectivity)
  • 9443 (for controller join API)

The load balancer can be implemented in many different ways and k0s doesn't have any additional requirements. You can use for example HAProxy, NGINX or your cloud provider's load balancer.

"},{"location":"high-availability/#example-configuration-haproxy","title":"Example configuration: HAProxy","text":"

Add the following lines to the end of the haproxy.cfg:

frontend kubeAPI\n    bind :6443\n    mode tcp\n    default_backend kubeAPI_backend\nfrontend konnectivity\n    bind :8132\n    mode tcp\n    default_backend konnectivity_backend\nfrontend controllerJoinAPI\n    bind :9443\n    mode tcp\n    default_backend controllerJoinAPI_backend\n\nbackend kubeAPI_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:6443 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:6443 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:6443 check check-ssl verify none\nbackend konnectivity_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:8132 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:8132 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:8132 check check-ssl verify none\nbackend controllerJoinAPI_backend\n    mode tcp\n    server k0s-controller1 <ip-address1>:9443 check check-ssl verify none\n    server k0s-controller2 <ip-address2>:9443 check check-ssl verify none\n    server k0s-controller3 <ip-address3>:9443 check check-ssl verify none\n\nlisten stats\n   bind *:9000\n   mode http\n   stats enable\n   stats uri /\n

The last block \"listen stats\" is optional, but can be helpful. It enables HAProxy statistics with a separate dashboard to monitor for example the health of each backend server. You can access it using a web browser:

http://<ip-addr>:9000\n

Restart HAProxy to apply the configuration changes.

"},{"location":"high-availability/#k0s-configuration","title":"k0s configuration","text":"

First and foremost, all controllers should utilize the same CA certificates and SA key pair:

/var/lib/k0s/pki/ca.key\n/var/lib/k0s/pki/ca.crt\n/var/lib/k0s/pki/sa.key\n/var/lib/k0s/pki/sa.pub\n/var/lib/k0s/pki/etcd/ca.key\n/var/lib/k0s/pki/etcd/ca.crt\n

To generate these certificates, you have two options: either generate them manually using the instructions provided here and then share it across controller nodes, or utilize k0sctl for automated generation and sharing.

The second important aspect is: the load balancer address must be configured to k0s either by using k0s.yaml or by using k0sctl to automatically deploy all controllers with the same configuration:

"},{"location":"high-availability/#configuration-using-k0syaml-for-each-controller","title":"Configuration using k0s.yaml (for each controller)","text":"

Note to update your load balancer's public ip address into two places.

spec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
"},{"location":"high-availability/#configuration-using-k0sctlyaml-for-k0sctl","title":"Configuration using k0sctl.yaml (for k0sctl)","text":"

Add the following lines to the end of the k0sctl.yaml. Note to update your load balancer's public ip address into two places.

  k0s:\nconfig:\nspec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n

For greater detail about k0s configuration, refer to the Full configuration file reference.

"},{"location":"install/","title":"Quick Start Guide","text":"

On completion of the Quick Start you will have a full Kubernetes cluster with a single node that includes both the controller and the worker. Such a setup is ideal for environments that do not require high-availability and multiple nodes.

"},{"location":"install/#prerequisites","title":"Prerequisites","text":"

Note: Before proceeding, make sure to review the System Requirements.

Though the Quick Start material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.

"},{"location":"install/#install-k0s","title":"Install k0s","text":"
  1. Download k0s

    Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.

    curl -sSLf https://get.k0s.sh | sudo sh\n
  2. Install k0s as a service

    The k0s install sub-command installs k0s as a system service on the local host that is running one of the supported init systems: Systemd or OpenRC. You can execute the install for workers, controllers or single node (controller+worker) instances.

    Run the following command to install a single node k0s that includes the controller and worker functions with the default configuration:

    sudo k0s install controller --single\n

    The k0s install controller sub-command accepts the same flags and parameters as the k0s controller. Refer to manual install for a custom config file example.

    It is possible to set environment variables with the install command:

    sudo k0s install controller -e ETCD_UNSUPPORTED_ARCH=arm\n

    The system service can be reinstalled with the --force flag:

    sudo k0s install controller --single --force\nsudo systemctl daemon-reload\n
  3. Start k0s as a service

    To start the k0s service, run:

    sudo k0s start\n

    The k0s service will start automatically after the node restart.

    A minute or two typically passes before the node is ready to deploy applications.

  4. Check service, logs and k0s status

    To get general information about your k0s instance's status, run:

    $ sudo k0s status\nVersion: v1.28.1+k0s.0\nProcess ID: 436\nRole: controller\nWorkloads: true\nInit System: linux-systemd\n
  5. Access your cluster using kubectl

    Note: k0s includes the Kubernetes command-line tool kubectl.

    Use kubectl to deploy your application or to check your node status:

    $ sudo k0s kubectl get nodes\nNAME   STATUS   ROLES    AGE    VERSION\nk0s    Ready    <none>   4m6s   v1.28.1+k0s\n
"},{"location":"install/#uninstall-k0s","title":"Uninstall k0s","text":"

The removal of k0s is a two-step process.

  1. Stop the service.

    sudo k0s stop\n
  2. Execute the k0s reset command.

    The k0s reset command cleans up the installed system service, data directories, containers, mounts and network namespaces.

    sudo k0s reset\n
  3. Reboot the system.

    A few small k0s fragments persist even after the reset (for example, iptables). As such, you should initiate a reboot after the running of the k0s reset command.

"},{"location":"install/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Manual Install: (Advanced) Manually deploy multi-node clusters
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
  • Airgap/Offline installation: Airgap deployment
"},{"location":"k0s-in-docker/","title":"Run k0s in Docker","text":"

You can create a k0s cluster on top of docker. In such a scenario, by default, both controller and worker nodes are run in the same container to provide an easy local testing \"cluster\".

"},{"location":"k0s-in-docker/#prerequisites","title":"Prerequisites","text":"

You will require a Docker environment running on a Mac, Windows, or Linux system.

"},{"location":"k0s-in-docker/#container-images","title":"Container images","text":"

The k0s containers are published both on Docker Hub and GitHub. For reasons of simplicity, the examples given here use Docker Hub (GitHub requires a separate authentication that is not covered). Alternative links include:

  • docker.io/k0sproject/k0s:1.28.1-k0s.0
  • ghcr.io/k0sproject/k0s:1.28.1-k0s.0

Note: Due to Docker Hub tag validation scheme, we have to use - as the k0s version separator instead of the usual +. So for example k0s version v1.28.1+k0s.0 is tagged as docker.io/k0sproject/k0s:v1.28.1-k0s.0.

"},{"location":"k0s-in-docker/#start-k0s","title":"Start k0s","text":""},{"location":"k0s-in-docker/#1-initiate-k0s","title":"1. Initiate k0s","text":"

You can run your own k0s in Docker:

docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n

Note: If you are using Docker Desktop as the runtime, starting from 4.3.0 version it's using cgroups v2 in the VM that runs the engine. This means you have to add some extra flags to the above command to get kubelet and containerd to properly work with cgroups v2:

--cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw\n
"},{"location":"k0s-in-docker/#2-optional-create-additional-workers","title":"2. (Optional) Create additional workers","text":"

You can attach multiple workers nodes into the cluster to then distribute your application containers to separate workers.

For each required worker:

  1. Acquire a join token for the worker:

    token=$(docker exec -t -i k0s k0s token create --role=worker)\n
  2. Run the container to create and join the new worker:

    docker run -d --name k0s-worker1 --hostname k0s-worker1 --privileged -v /var/lib/k0s docker.io/k0sproject/k0s:latest k0s worker $token\n
"},{"location":"k0s-in-docker/#3-access-your-cluster","title":"3. Access your cluster","text":"

Access your cluster using kubectl:

docker exec k0s kubectl get nodes\n

Alternatively, grab the kubeconfig file with docker exec k0s cat /var/lib/k0s/pki/admin.conf and paste it into Lens.

"},{"location":"k0s-in-docker/#use-docker-compose-alternative","title":"Use Docker Compose (alternative)","text":"

As an alternative you can run k0s using Docker Compose:

version: \"3.9\"\nservices:\nk0s:\ncontainer_name: k0s\nimage: docker.io/k0sproject/k0s:latest\ncommand: k0s controller --config=/etc/k0s/config.yaml --enable-worker\nhostname: k0s\nprivileged: true\nvolumes:\n- \"/var/lib/k0s\"\ntmpfs:\n- /run\n- /var/run\nports:\n- \"6443:6443\"\nnetwork_mode: \"bridge\"\nenvironment:\nK0S_CONFIG: |-\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\n# Any additional configuration goes here ...\n
"},{"location":"k0s-in-docker/#known-limitations","title":"Known limitations","text":""},{"location":"k0s-in-docker/#no-custom-docker-networks","title":"No custom Docker networks","text":"

Currently, k0s nodes cannot be run if the containers are configured to use custom networks (for example, with --net my-net). This is because Docker sets up a custom DNS service within the network which creates issues with CoreDNS. No completely reliable workaounds are available, however no issues should arise from running k0s cluster(s) on a bridge network.

"},{"location":"k0s-in-docker/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"k0s-multi-node/","title":"Manual Install (Advanced)","text":"

You can manually set up k0s nodes by creating a multi-node cluster that is locally managed on each node. This involves several steps, to first install each node separately, and to then connect the node together using access tokens.

"},{"location":"k0s-multi-node/#prerequisites","title":"Prerequisites","text":"

Note: Before proceeding, make sure to review the System Requirements.

Though the Manual Install material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.

You can speed up the use of the k0s command by enabling shell completion.

"},{"location":"k0s-multi-node/#install-k0s","title":"Install k0s","text":""},{"location":"k0s-multi-node/#1-download-k0s","title":"1. Download k0s","text":"

Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.

curl -sSLf https://get.k0s.sh | sudo sh\n

The download script accepts the following environment variables:

Variable Purpose K0S_VERSION=v1.28.1+k0s.0 Select the version of k0s to be installed DEBUG=true Output commands and their arguments at execution.

Note: If you require environment variables and use sudo, you can do:

curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.28.1+k0s.0 sh\n
"},{"location":"k0s-multi-node/#2-bootstrap-a-controller-node","title":"2. Bootstrap a controller node","text":"

Create a configuration file:

mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n

Note: For information on settings modification, refer to the configuration documentation.

sudo k0s install controller -c /etc/k0s/k0s.yaml\n
sudo k0s start\n

k0s process acts as a \"supervisor\" for all of the control plane components. In moments the control plane will be up and running.

"},{"location":"k0s-multi-node/#3-create-a-join-token","title":"3. Create a join token","text":"

You need a token to join workers to the cluster. The token embeds information that enables mutual trust between the worker and controller(s) and which allows the node to join the cluster as worker.

To get a token, run the following command on one of the existing controller nodes:

sudo k0s token create --role=worker\n

The resulting output is a long token string, which you can use to add a worker to the cluster.

For enhanced security, run the following command to set an expiration time for the token:

sudo k0s token create --role=worker --expiry=100h > token-file\n
"},{"location":"k0s-multi-node/#4-add-workers-to-the-cluster","title":"4. Add workers to the cluster","text":"

To join the worker, run k0s in the worker mode with the join token you created:

sudo k0s install worker --token-file /path/to/token/file\n
sudo k0s start\n
"},{"location":"k0s-multi-node/#about-tokens","title":"About tokens","text":"

The join tokens are base64-encoded kubeconfigs for several reasons:

  • Well-defined structure
  • Capable of direct use as bootstrap auth configs for kubelet
  • Embedding of CA info for mutual trust

The bearer token embedded in the kubeconfig is a bootstrap token. For controller join tokens and worker join tokens k0s uses different usage attributes to ensure that k0s can validate the token role on the controller side.

"},{"location":"k0s-multi-node/#5-add-controllers-to-the-cluster","title":"5. Add controllers to the cluster","text":"

Note: Either etcd or an external data store (MySQL or Postgres) via kine must be in use to add new controller nodes to the cluster. Pay strict attention to the high availability configuration and make sure the configuration is identical for all controller nodes.

To create a join token for the new controller, run the following command on an existing controller:

sudo k0s token create --role=controller --expiry=1h > token-file\n

On the new controller, run:

sudo k0s install controller --token-file /path/to/token/file -c /etc/k0s/k0s.yaml\n

Important notice here is that each controller in the cluster must have k0s.yaml otherwise some cluster nodes will use default config values which will lead to inconsistency behavior. If your configuration file includes IP addresses (node address, sans, etcd peerAddress), remember to update them accordingly for this specific controller node.

sudo k0s start\n
"},{"location":"k0s-multi-node/#6-check-k0s-status","title":"6. Check k0s status","text":"

To get general information about your k0s instance's status:

 sudo k0s status\n
Version: v1.28.1+k0s.0\nProcess ID: 2769\nParent Process ID: 1\nRole: controller\nInit System: linux-systemd\nService file: /etc/systemd/system/k0scontroller.service\n
"},{"location":"k0s-multi-node/#7-access-your-cluster","title":"7. Access your cluster","text":"

Use the Kubernetes 'kubectl' command-line tool that comes with k0s binary to deploy your application or check your node status:

sudo k0s kubectl get nodes\n
NAME   STATUS   ROLES    AGE    VERSION\nk0s    Ready    <none>   4m6s   v1.28.1+k0s\n

You can also access your cluster easily with Lens, simply by copying the kubeconfig and pasting it to Lens:

sudo cat /var/lib/k0s/pki/admin.conf\n

Note: To access the cluster from an external network you must replace localhost in the kubeconfig with the host ip address for your controller.

"},{"location":"k0s-multi-node/#next-steps","title":"Next Steps","text":"
  • Install using k0sctl: Deploy multi-node clusters using just one command
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"k0s-single-node/","title":"K0s single node","text":"

See the Quick Start Guide.

"},{"location":"k0sctl-install/","title":"Install using k0sctl","text":"

k0sctl is a command-line tool for bootstrapping and managing k0s clusters. k0sctl connects to the provided hosts using SSH and gathers information on the hosts, with which it forms a cluster by configuring the hosts, deploying k0s, and then connecting the k0s nodes together.

With k0sctl, you can create multi-node clusters in a manner that is automatic and easily repeatable. This method is recommended for production cluster installation.

Note: The k0sctl install method is necessary for automatic upgrade.

"},{"location":"k0sctl-install/#prerequisites","title":"Prerequisites","text":"

You can execute k0sctl on any system that supports the Go language. Pre-compiled k0sctl binaries are available on the k0sctl releases page).

Note: For target host prerequisites information, refer to the k0s System Requirements.

"},{"location":"k0sctl-install/#install-k0s","title":"Install k0s","text":""},{"location":"k0sctl-install/#1-install-k0sctl-tool","title":"1. Install k0sctl tool","text":"

k0sctl is a single binary, the instructions for downloading and installing of which are available in the k0sctl github repository.

"},{"location":"k0sctl-install/#2-configure-the-cluster","title":"2. Configure the cluster","text":"
  1. Run the following command to create a k0sctl configuration file:

    k0sctl init > k0sctl.yaml\n

    This action creates a k0sctl.yaml file in the current directory:

    apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nhosts:\n- role: controller\nssh:\naddress: 10.0.0.1 # replace with the controller's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n- role: worker\nssh:\naddress: 10.0.0.2 # replace with the worker's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n
  2. Provide each host with a valid IP address that is reachable by k0ctl, and the connection details for an SSH connection.

Note: Refer to the k0sctl documentation for k0sctl configuration specifications.

"},{"location":"k0sctl-install/#3-deploy-the-cluster","title":"3. Deploy the cluster","text":"

Run k0sctl apply to perform the cluster deployment:

k0sctl apply --config k0sctl.yaml\n
\u2800\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588    \u2588\u2588\u2588    \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\n\nINFO k0sctl 0.0.0 Copyright 2021, Mirantis Inc.\nINFO Anonymized telemetry will be sent to Mirantis.\nINFO By continuing to use k0sctl you agree to these terms:\nINFO https://k0sproject.io/licenses/eula\nINFO ==> Running phase: Connect to hosts\nINFO [ssh] 10.0.0.1:22: connected\nINFO [ssh] 10.0.0.2:22: connected\nINFO ==> Running phase: Detect host operating systems\nINFO [ssh] 10.0.0.1:22: is running Ubuntu 20.10\nINFO [ssh] 10.0.0.2:22: is running Ubuntu 20.10\nINFO ==> Running phase: Prepare hosts\nINFO [ssh] 10.0.0.1:22: installing kubectl\nINFO ==> Running phase: Gather host facts\nINFO [ssh] 10.0.0.1:22: discovered 10.12.18.133 as private address\nINFO ==> Running phase: Validate hosts\nINFO ==> Running phase: Gather k0s facts\nINFO ==> Running phase: Download K0s on the hosts\nINFO [ssh] 10.0.0.2:22: downloading k0s 0.11.0\nINFO [ssh] 10.0.0.1:22: downloading k0s 0.11.0\nINFO ==> Running phase: Configure K0s\nWARN [ssh] 10.0.0.1:22: generating default configuration\nINFO [ssh] 10.0.0.1:22: validating configuration\nINFO [ssh] 10.0.0.1:22: configuration was changed\nINFO ==> Running phase: Initialize K0s Cluster\nINFO [ssh] 10.0.0.1:22: installing k0s controller\nINFO [ssh] 10.0.0.1:22: waiting for the k0s service to start\nINFO [ssh] 10.0.0.1:22: waiting for kubernetes api to respond\nINFO ==> Running phase: Install workers\nINFO [ssh] 10.0.0.1:22: generating token\nINFO [ssh] 10.0.0.2:22: writing join token\nINFO [ssh] 10.0.0.2:22: installing k0s worker\nINFO [ssh] 10.0.0.2:22: starting service\nINFO [ssh] 10.0.0.2:22: waiting for node to become ready\nINFO ==> Running phase: Disconnect from hosts\nINFO ==> Finished in 2m2s\nINFO k0s cluster version 0.11.0 is now installed\nINFO Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO      k0sctl kubeconfig\n
"},{"location":"k0sctl-install/#4-access-the-cluster","title":"4. Access the cluster","text":"

To access your k0s cluster, use k0sctl to generate a kubeconfig for the purpose.

k0sctl kubeconfig > kubeconfig\n

With the kubeconfig, you can access your cluster using either kubectl or Lens.

kubectl get pods --kubeconfig kubeconfig -A\n
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE\nkube-system   calico-kube-controllers-5f6546844f-w8x27   1/1     Running   0          3m50s\nkube-system   calico-node-vd7lx                          1/1     Running   0          3m44s\nkube-system   coredns-5c98d7d4d8-tmrwv                   1/1     Running   0          4m10s\nkube-system   konnectivity-agent-d9xv2                   1/1     Running   0          3m31s\nkube-system   kube-proxy-xp9r9                           1/1     Running   0          4m4s\nkube-system   metrics-server-6fbcd86f7b-5frtn            1/1     Running   0          3m51s\n
"},{"location":"k0sctl-install/#known-limitations","title":"Known limitations","text":"
  • k0sctl does not perform any discovery of hosts, and thus it only operates on the hosts listed in the provided configuration.
  • k0sctl can only add more nodes to the cluster. It cannot remove existing nodes.
"},{"location":"k0sctl-install/#next-steps","title":"Next Steps","text":"
  • Control plane configuration options: Networking and datastore configuration
  • Worker node configuration options: Node labels and kubelet arguments
  • Support for cloud providers: Load balancer or storage configuration
  • Installing the Traefik Ingress Controller: Ingress deployment information
"},{"location":"manifests/","title":"Manifest Deployer","text":"

Included with k0s, Manifest Deployer is one of two methods you can use to run k0s with your preferred extensions (the other being by defining your extensions as Helm charts).

"},{"location":"manifests/#overview","title":"Overview","text":"

Manifest Deployer runs on the controller nodes and provides an easy way to automatically deploy manifests at runtime.

By default, k0s reads all manifests under /var/lib/k0s/manifests and ensures that their state matches the cluster state. Moreover, on removal of a manifest file, k0s will automatically prune all of it associated resources.

The use of Manifest Deployer is quite similar to the use the kubectl apply command. The main difference between the two is that Manifest Deployer constantly monitors the directory for changes, and thus you do not need to manually apply changes that are made to the manifest files.

"},{"location":"manifests/#note","title":"Note","text":"
  • Each directory that is a direct descendant of /var/lib/k0s/manifests is considered to be its own \"stack\". Nested directories (further subfolders), however, are excluded from the stack mechanism and thus are not automatically deployed by the Manifest Deployer.
  • k0s uses the indepenent stack mechanism for some of its internal in-cluster components, as well as for other resources. Be sure to only touch the manifests that are not managed by k0s.
  • Explicitly define the namespace in the manifests (Manifest Deployer does not have a default namespace).
"},{"location":"manifests/#example","title":"Example","text":"

To try Manifest Deployer, create a new folder under /var/lib/k0s/manifests and then create a manifest file (such as nginx.yaml) with the following content:

apiVersion: v1\nkind: Namespace\nmetadata:\nname: nginx\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx-deployment\nnamespace: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nreplicas: 3\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- name: nginx\nimage: nginx:latest\nports:\n- containerPort: 80\n

New pods will appear soon thereafter.

sudo k0s kubectl get pods --namespace nginx\n
NAME                                READY   STATUS    RESTARTS   AGE\nnginx-deployment-66b6c48dd5-8zq7d   1/1     Running   0          10m\nnginx-deployment-66b6c48dd5-br4jv   1/1     Running   0          10m\nnginx-deployment-66b6c48dd5-sqvhb   1/1     Running   0          10m\n
"},{"location":"networking/","title":"Networking","text":""},{"location":"networking/#in-cluster-networking","title":"In-cluster networking","text":"

k0s supports two Container Network Interface (CNI) providers out-of-box, Kube-router and Calico. In addition, k0s can support your own CNI configuration.

"},{"location":"networking/#notes","title":"Notes","text":"
  • When deploying k0s with the default settings, all pods on a node can communicate with all pods on all nodes. No configuration changes are needed to get started.
  • Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment.
"},{"location":"networking/#kube-router","title":"Kube-router","text":"

Kube-router is built into k0s, and so by default the distribution uses it for network provision. Kube-router uses the standard Linux networking stack and toolset, and you can set up CNI networking without any overlays by using BGP as the main mechanism for in-cluster networking.

  • Supports armv7 (among many other archs)
  • Uses bit less resources (~15%)
  • Does NOT support dual-stack (IPv4/IPv6) networking
  • Does NOT support Windows nodes
  • Does NOT activate hairpin mode by default
"},{"location":"networking/#calico","title":"Calico","text":"

In addition to Kube-router, k0s also offers Calico as an alternative, built-in network provider. Calico is a layer 3 container networking solution that routes packets to pods. It supports, for example, pod-specific network policies that help to secure kubernetes clusters in demanding use cases. Calico uses the vxlan overlay network by default, and you can configure it to support ipip (IP-in-IP).

  • Does NOT support armv7
  • Uses bit more resources
  • Supports dual-stack (IPv4/IPv6) networking
  • Supports Windows nodes
"},{"location":"networking/#custom-cni-configuration","title":"Custom CNI configuration","text":"

You can opt-out of having k0s manage the network setup and choose instead to use any network plugin that adheres to the CNI specification. To do so, configure custom as the network provider in the k0s configuration file (k0s.yaml). You can do this, for example, by pushing network provider manifests into /var/lib/k0s/manifests, from where k0s controllers will collect them for deployment into the cluster (for more information, refer to Manifest Deployer.

"},{"location":"networking/#controller-worker-communication","title":"Controller-Worker communication","text":"

One goal of k0s is to allow for the deployment of an isolated control plane, which may prevent the establishment of an IP route between controller nodes and the pod network. Thus, to enable this communication path (which is mandated by conformance tests), k0s deploys Konnectivity service to proxy traffic from the API server (control plane) into the worker nodes. This ensures that we can always fulfill all the Kubernetes API functionalities, but still operate the control plane in total isolation from the workers.

Note: To allow Konnectivity agents running on the worker nodes to establish the connection, configure your firewalls for outbound access, port 8132. Moreover, configure your firewalls for outbound access, port 6443, in order to access Kube-API from the worker nodes.

"},{"location":"networking/#required-ports-and-protocols","title":"Required ports and protocols","text":"Protocol Port Service Direction Notes TCP 2380 etcd peers controller <-> controller TCP 6443 kube-apiserver Worker, CLI => controller Authenticated Kube API using Kube TLS client certs, ServiceAccount tokens with RBAC TCP 179 kube-router worker <-> worker BGP routing sessions between peers UDP 4789 Calico worker <-> worker Calico VXLAN overlay TCP 10250 kubelet Master, Worker => Host * Authenticated kubelet API for the master node kube-apiserver (and heapster/metrics-server addons) using TLS client certs TCP 9443 k0s-api controller <-> controller k0s controller join API, TLS with token auth TCP 8132 konnectivity worker <-> controller Konnectivity is used as \"reverse\" tunnel between kube-apiserver and worker kubelets"},{"location":"networking/#iptables","title":"iptables","text":"

iptables can work in two distinct modes, legacy and nftables. k0s autodetects the mode and prefers nftables. To check which mode k0s is configured with check ls -lah /var/lib/k0s/bin/. The iptables link target reveals the mode which k0s selected. k0s has the same logic as other k8s components, but to ensure al component have picked up the same mode you can check via: kube-proxy: nsenter -t $(pidof kube-proxy) -m iptables -V kube-router: nsenter -t $(pidof kube-router) -m /sbin/iptables -V calico: nsenter -t $(pidof -s calico-node) -m iptables -V

There are known version incompatibility issues in iptables versions. k0s ships (in /var/lib/k0s/bin) a version of iptables that is tested to interoperate with all other Kubernetes components it ships with. However if you have other tooling (firewalls etc.) on your hosts that uses iptables and the host iptables version is different that k0s (and other k8s components) ships with it may cause networking issues. This is based on the fact that iptables being user-space tooling it does not provide any strong version compatibility guarantees.

"},{"location":"networking/#firewalld-k0s","title":"Firewalld & k0s","text":"

If you are using firewalld on your hosts you need to ensure it is configured to use the same FirewallBackend as k0s and other Kubernetes components use. Otherwise networking will be broken in various ways.

Here's an example configuration for a tested working networking setup:

[root@rhel-test ~]# firewall-cmd --list-all\npublic (active)\ntarget: default\n  icmp-block-inversion: no\n  interfaces: eth0\n  sources: 10.244.0.0/16 10.96.0.0/12\n  services: cockpit dhcpv6-client ssh\n  ports: 80/tcp 6443/tcp 8132/tcp 10250/tcp 179/tcp 179/udp\n  protocols: forward: no\n  masquerade: yes\n  forward-ports: source-ports: icmp-blocks: rich rules:\n
"},{"location":"nllb/","title":"Node-local load balancing","text":"

Note: This feature is experimental! Expect instabilities and/or breaking changes.

For clusters that don't have an externally managed load balancer for the k0s control plane, there is another option to get a highly available control plane, at least from within the cluster. K0s calls this \"node-local load balancing\". In contrast to an externally managed load balancer, node-local load balancing takes place exclusively on the worker nodes. It does not contribute to making the control plane highly available to the outside world (e.g. humans interacting with the cluster using management tools such as Lens or kubectl), but rather makes the cluster itself internally resilient to controller node outages.

"},{"location":"nllb/#technical-functionality","title":"Technical functionality","text":"

The k0s worker process manages a load balancer on each worker node's loopback interface and configures the relevant components to use that load balancer. This allows for requests from worker components to the control plane to be distributed among all currently available controller nodes, rather than being directed to the controller node that has been used to join a particular worker into the cluster. This improves the reliability and fault tolerance of the cluster in case a controller node becomes unhealthy.

Envoy is the only load balancer that is supported so far. Please note that Envoy is not available on ARMv7, so node-local load balancing is currently unavailable on that platform.

"},{"location":"nllb/#enabling-in-a-cluster","title":"Enabling in a cluster","text":"

In order to use node-local load balancing, the cluster needs to comply with the following:

  • The cluster doesn't use an externally managed load balancer, i.e. the cluster configuration doesn't specify a non-empty spec.api.externalAddress.
  • K0s isn't running as a single node, i.e. it isn't started using the --single flag.
  • The cluster should have multiple controller nodes. Node-local load balancing also works with a single controller node, but is only useful in conjunction with a highly available control plane.

Add the following to the cluster configuration (k0s.yaml):

spec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n

Or alternatively, if using k0sctl, add the following to the k0sctl configuration (k0sctl.yaml):

spec:\nk0s:\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n

All newly added worker nodes will then use node-local load balancing. The k0s worker process on worker nodes that are already running must be restarted for the new configuration to take effect.

"},{"location":"nllb/#full-example-using-k0sctl","title":"Full example using k0sctl","text":"

The following example shows a full k0sctl configuration file featuring three controllers and two workers with node-local load balancing enabled:

apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: v1.28.1+k0s.0\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\nhosts:\n- role: controller\nssh:\naddress: 10.81.146.254\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.184\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.113\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.198\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.51\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n

Save the above configuration into a file called k0sctl.yaml and apply it in order to bootstrap the cluster:

$ k0sctl apply\n\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588          \u2588\u2588\u2588    \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588    \u2588\u2588\u2588    \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\nk0sctl 0.15.0 Copyright 2022, k0sctl authors.\nBy continuing to use k0sctl you agree to these terms:\nhttps://k0sproject.io/licenses/eula\nlevel=info msg=\"==> Running phase: Connect to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: connected\"\nlevel=info msg=\"==> Running phase: Detect host operating systems\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"==> Running phase: Acquire exclusive host lock\"\nlevel=info msg=\"==> Running phase: Prepare hosts\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing packages (curl)\"\nlevel=info msg=\"==> Running phase: Gather host facts\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: using k0s-controller-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: using k0s-worker-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: using k0s-worker-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: using k0s-controller-2 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: using k0s-controller-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: discovered eth0 as private interface\"\nlevel=info msg=\"==> Running phase: Download k0s binaries to local host\"\nlevel=info msg=\"==> Running phase: Validate hosts\"\nlevel=info msg=\"==> Running phase: Gather k0s facts\"\nlevel=info msg=\"==> Running phase: Validate facts\"\nlevel=info msg=\"==> Running phase: Upload k0s binaries to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"==> Running phase: Configure k0s\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: configuration was changed\"\nlevel=info msg=\"==> Running phase: Initialize the k0s cluster\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install controllers\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install workers\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: waiting for node to become ready\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: waiting for node to become ready\"\nlevel=info msg=\"==> Running phase: Release exclusive host lock\"\nlevel=info msg=\"==> Running phase: Disconnect from hosts\"\nlevel=info msg=\"==> Finished in 3m30s\"\nlevel=info msg=\"k0s cluster version v1.28.1+k0s.0 is now installed\"\nlevel=info msg=\"Tip: To access the cluster you can now fetch the admin kubeconfig using:\"\nlevel=info msg=\"     k0sctl kubeconfig\"\n

The cluster with the two nodes should be available by now. Setup the kubeconfig file in order to interact with it:

k0sctl kubeconfig > k0s-kubeconfig\nexport KUBECONFIG=$(pwd)/k0s-kubeconfig\n

The three controllers are available and provide API Server endpoints:

$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME                        HOLDER                                                             AGE\nk0s-ctrl-k0s-controller-0   9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1   2m37s\nk0s-ctrl-k0s-controller-1   fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97   2m18s\nk0s-ctrl-k0s-controller-2   5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   2m9s\nk0s-endpoint-reconciler     9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1   2m37s\n\n$ kubectl -n default get endpoints\nNAME         ENDPOINTS                                                  AGE\nkubernetes   10.81.146.113:6443,10.81.146.184:6443,10.81.146.254:6443   2m49s\n

The first controller is the current k0s leader. The two worker nodes can be listed, too:

$ kubectl get nodes -owide\nNAME           STATUS   ROLES    AGE     VERSION       INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION   CONTAINER-RUNTIME\nk0s-worker-0   Ready    <none>   2m16s   v1.28.1+k0s   10.81.146.198   <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\nk0s-worker-1   Ready    <none>   2m15s   v1.28.1+k0s   10.81.146.51    <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\n

There is one node-local load balancer pod running for each worker node:

$ kubectl -n kube-system get pod -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME                READY   STATUS    RESTARTS   AGE   IP              NODE           NOMINATED NODE   READINESS GATES\nnllb-k0s-worker-0   1/1     Running   0          81s   10.81.146.198   k0s-worker-0   <none>           <none>\nnllb-k0s-worker-1   1/1     Running   0          85s   10.81.146.51    k0s-worker-1   <none>           <none>\n

The cluster is using node-local load balancing and is able to tolerate the outage of one controller node. Shutdown the first controller to simulate a failure condition:

$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.254 'echo \"Powering off $(hostname) ...\" && sudo poweroff'\nPowering off k0s-controller-0 ...\n

Node-local load balancing provides high availability from within the cluster, not from the outside. The generated kubeconfig file lists the first controller's IP as the Kubernetes API server address by default. As this controller is gone by now, a subsequent call to kubectl will fail:

$ kubectl get nodes\nUnable to connect to the server: dial tcp 10.81.146.254:6443: connect: no route to host\n

Changing the server address in k0s-kubeconfig from the first controller to another one makes the cluster accessible again. Pick one of the other controller IP addresses and put that into the kubeconfig file. The addresses are listed both in k0sctl.yaml as well as in the output of kubectl -n default get endpoints above.

$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.184 hostname\nk0s-controller-1\n\n$ sed -i s#https://10\\\\.81\\\\.146\\\\.254:6443#https://10.81.146.184:6443#g k0s-kubeconfig\n\n$ kubectl get nodes -owide\nNAME           STATUS   ROLES    AGE     VERSION       INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION   CONTAINER-RUNTIME\nk0s-worker-0   Ready    <none>   3m35s   v1.28.1+k0s   10.81.146.198   <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\nk0s-worker-1   Ready    <none>   3m34s   v1.28.1+k0s   10.81.146.51    <none>        Alpine Linux v3.17   5.15.83-0-virt   containerd://1.7.1\n\n$ kubectl -n kube-system get pods -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME                READY   STATUS    RESTARTS   AGE     IP              NODE           NOMINATED NODE   READINESS GATES\nnllb-k0s-worker-0   1/1     Running   0          2m31s   10.81.146.198   k0s-worker-0   <none>           <none>\nnllb-k0s-worker-1   1/1     Running   0          2m35s   10.81.146.51    k0s-worker-1   <none>           <none>\n

The first controller is no longer active. Its IP address is not listed in the default/kubernetes Endpoints resource and its k0s controller lease is orphaned:

$ kubectl -n default get endpoints\nNAME         ENDPOINTS                               AGE\nkubernetes   10.81.146.113:6443,10.81.146.184:6443   3m56s\n\n$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME                        HOLDER                                                             AGE\nk0s-ctrl-k0s-controller-0                                                                      4m47s\nk0s-ctrl-k0s-controller-1   fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97   4m28s\nk0s-ctrl-k0s-controller-2   5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   4m19s\nk0s-endpoint-reconciler     5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d   4m47s\n

Despite that controller being unavailable, the cluster remains operational. The third controller has become the new k0s leader. Workloads will run just fine:

$ kubectl -n default run nginx --image=nginx\npod/nginx created\n\n$ kubectl -n default get pods -owide\nNAME    READY   STATUS    RESTARTS   AGE   IP           NODE           NOMINATED NODE   READINESS GATES\nnginx   1/1     Running   0          16s   10.244.0.5   k0s-worker-1   <none>           <none>\n\n$ kubectl -n default logs nginx\n/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration\n/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh\n10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf\n10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh\n/docker-entrypoint.sh: Configuration complete; ready for start up\n[notice] 1#1: using the \"epoll\" event method\n[notice] 1#1: nginx/1.23.3\n[notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)\n[notice] 1#1: OS: Linux 5.15.83-0-virt\n[notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576\n[notice] 1#1: start worker processes\n[notice] 1#1: start worker process 28\n
"},{"location":"podsecurity/","title":"Pod Security Standards","text":"

Since Pod Security Policies have been removed in Kubernetes v1.25, Kubernetes offers Pod Security Standards \u2013 a new way to enhance cluster security.

To enable PSS in k0s you need to create an admission controller config file:

```yaml\napiVersion: apiserver.config.k8s.io/v1\nkind: AdmissionConfiguration\nplugins:\n- name: PodSecurity\n  configuration:\n    apiVersion: pod-security.admission.config.k8s.io/v1beta1\n    kind: PodSecurityConfiguration\n    # Defaults applied when a mode label is not set.\n    defaults:\n      enforce: \"privileged\"\n      enforce-version: \"latest\"\n    exemptions:\n      # Don't forget to exempt namespaces or users that are responsible for deploying\n      # cluster components, because they need to run privileged containers\n      usernames: [\"admin\"]\n      namespaces: [\"kube-system\"]\n```\n

Add these extra arguments to the k0s configuration:

```yaml\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\n  api:\n    extraArgs:\n      admission-control-config-file: /path/to/admission/control/config.yaml\n```\n
"},{"location":"raspberry-pi4/","title":"Create a Raspberry Pi 4 cluster","text":""},{"location":"raspberry-pi4/#prerequisites","title":"Prerequisites","text":"

This guide assumes that you use a Raspberry Pi 4 Model B computer and a sufficiently large SD card of at least 32 GB. We will be using Ubuntu Linux for this guide, although k0s should run quite fine on other 64-bit Linux distributions for the Raspberry Pi as well. Please file a Bug if you encounter any obstacles.

"},{"location":"raspberry-pi4/#set-up-the-system","title":"Set up the system","text":""},{"location":"raspberry-pi4/#prepare-sd-card-and-boot-up-the-raspberry-pi","title":"Prepare SD card and boot up the Raspberry Pi","text":"

Install Ubuntu Server 22.04.1 LTS 64-bit for Raspberry Pi. Ubuntu provides a step by step guide for the installation process. They use Raspberry Pi Imager, a specialized imaging utility that you can use to write the Ubuntu image, amongst others, to your SD cards. Follow that guide to get a working installation. (You can skip part 5 of the guide, since we won't need a Desktop Environment to run k0s.)

Alternatively, you can also opt to download the Ubuntu server image for Raspberry Pi manually and write it to an SD card using a tool like dd:

wget https://cdimage.ubuntu.com/releases/22.04.1/release/ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\nunxz ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\ndd if=ubuntu-22.04.1-preinstalled-server-arm64+raspi.img of=/dev/mmcblk0 bs=4M status=progress\n

Note: The manual process is more prone to accidental data loss than the guided one via Raspberry Pi Imager. Be sure to choose the correct device names. The previous content of the SD card will be wiped. Moreover, the partition written to the SD card needs to be resized to make the full capacity of the card available to Ubuntu. This can be achieved, for example, in this way:

growpart /dev/mmcblk0 2\nresize2fs /dev/mmcblk0p2\n

Ubuntu uses cloud-init to allow for automated customizations of the system configuration. The cloud-init configuration files are located on the boot partition of the SD card. You can mount that partition and modify those, e.g. to provision network configuration, users, authorized SSH keys, additional packages and also an automatic installation of k0s.

After you have prepared the SD card, plug it into the Raspberry Pi and boot it up. Once cloud-init finished bootstrapping the system, the default login credentials are set to user ubuntu with password ubuntu (which you will be prompted to change on first login).

"},{"location":"raspberry-pi4/#review-network-configurations","title":"Review network configurations","text":"

Note: For network configuration purposes, this documentation assumes that all of your computers are connected on the same subnet.

Review k0s's required ports and protocols to ensure that your network and firewall configurations allow necessary traffic for the cluster.

Review the Ubuntu Server Networking Configuration documentation to ensure that all systems have a static IP address on the network, or that the network is providing a static DHCP lease for the nodes. If the network should be managed via cloud-init, please refer to their documentation.

"},{"location":"raspberry-pi4/#optional-provision-ssh-keys","title":"(Optional) Provision SSH keys","text":"

Ubuntu Server deploys and enables OpenSSH via cloud-init by default. Confirm, though, that for whichever user you will deploy the cluster with on the build system, their SSH Key is copied to each node's root user. Before you start, the configuration should be such that the current user can run:

ssh root@${HOST}\n

Where ${HOST} is any node and the login can succeed with no further prompts.

"},{"location":"raspberry-pi4/#optional-create-a-swap-file","title":"(Optional) Create a swap file","text":"

While having a swap file is technically optional, it can help to ease memory pressure when running memory intensive workloads or on Raspberry Pis with less than 8 GB of RAM.

  1. To create a swap file:

    fallocate -l 2G /swapfile && \\\nchmod 0600 /swapfile && \\\nmkswap /swapfile && \\\nswapon -a\n
  2. Ensure that the usage of swap is not too aggressive by setting the sudo sysctl vm.swappiness=10 (the default is generally higher) and configuring it to be persistent in /etc/sysctl.d/*.

  3. Ensure that your swap is mounted after reboots by confirming that the following line exists in your /etc/fstab configuration:

    /swapfile         none           swap sw       0 0\n
"},{"location":"raspberry-pi4/#download-k0s","title":"Download k0s","text":"

Download a k0s release. For example:

wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-arm64 # replace version number!\nsudo install /tmp/k0s /usr/local/bin/k0s\n

\u2015 or \u2015

Use the k0s download script (as one command) to download the latest stable k0s and make it executable in /usr/bin/k0s.

curl -sSLf https://get.k0s.sh | sudo sh\n

At this point you can run k0s:

ubuntu@ubuntu:~$ k0s version\nv1.28.1+k0s.0\n

To check if k0s's system requirements and external runtime dependencies are fulfilled by your current setup, you can invoke k0s sysinfo:

ubuntu@ubuntu:~$ k0s sysinfo\nMachine ID: \"d84cde1f38844d1425dc04c454c5aa95e41fb11115bbb141c016f4cd3dea4f51\" (from machine) (pass)\nTotal memory: 3.7 GiB (pass)\nDisk space available for /var/lib/k0s: 24.3 GiB (pass)\nOperating system: Linux (pass)\n  Linux kernel release: 5.15.0-1013-raspi (pass)\n  Max. file descriptors per process: current: 1024 / max: 1048576 (warning: < 65536)\n  AppArmor: unavailable (pass)\n  Executable in path: modprobe: /usr/sbin/modprobe (pass)\n  /proc file system: mounted (0x9fa0) (pass)\n  Control Groups: version 2 (pass)\n    cgroup controller \"cpu\": available (pass)\n    cgroup controller \"cpuacct\": available (via cpu in version 2) (pass)\n    cgroup controller \"cpuset\": available (pass)\n    cgroup controller \"memory\": available (pass)\n    cgroup controller \"devices\": available (assumed) (pass)\n    cgroup controller \"freezer\": available (assumed) (pass)\n    cgroup controller \"pids\": available (pass)\n    cgroup controller \"hugetlb\": available (pass)\n    cgroup controller \"blkio\": available (via io in version 2) (pass)\n  CONFIG_CGROUPS: Control Group support: built-in (pass)\n    CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem: built-in (pass)\n    CONFIG_CGROUP_PIDS: PIDs cgroup subsystem: built-in (pass)\n    CONFIG_CGROUP_DEVICE: Device controller for cgroups: built-in (pass)\n    CONFIG_CPUSETS: Cpuset support: built-in (pass)\n    CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem: built-in (pass)\n    CONFIG_MEMCG: Memory Resource Controller for Control Groups: built-in (pass)\n    CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups: built-in (pass)\n    CONFIG_CGROUP_SCHED: Group CPU scheduler: built-in (pass)\n      CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER: built-in (pass)\n        CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED: built-in (pass)\n    CONFIG_BLK_CGROUP: Block IO controller: built-in (pass)\n  CONFIG_NAMESPACES: Namespaces support: built-in (pass)\n    CONFIG_UTS_NS: UTS namespace: built-in (pass)\n    CONFIG_IPC_NS: IPC namespace: built-in (pass)\n    CONFIG_PID_NS: PID namespace: built-in (pass)\n    CONFIG_NET_NS: Network namespace: built-in (pass)\n  CONFIG_NET: Networking support: built-in (pass)\n    CONFIG_INET: TCP/IP networking: built-in (pass)\n      CONFIG_IPV6: The IPv6 protocol: built-in (pass)\n    CONFIG_NETFILTER: Network packet filtering framework (Netfilter): built-in (pass)\n      CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration: built-in (pass)\n      CONFIG_NF_CONNTRACK: Netfilter connection tracking support: module (pass)\n      CONFIG_NETFILTER_XTABLES: Netfilter Xtables support: module (pass)\n        CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support: module (pass)\n        CONFIG_NETFILTER_XT_MARK: nfmark target and match support: module (pass)\n        CONFIG_NETFILTER_XT_SET: set target and match support: module (pass)\n        CONFIG_NETFILTER_XT_TARGET_MASQUERADE: MASQUERADE target support: module (pass)\n        CONFIG_NETFILTER_XT_NAT: \"SNAT and DNAT\" targets support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: \"addrtype\" address type match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_CONNTRACK: \"conntrack\" connection tracking match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_MULTIPORT: \"multiport\" Multiple port match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_RECENT: \"recent\" match support: module (pass)\n        CONFIG_NETFILTER_XT_MATCH_STATISTIC: \"statistic\" match support: module (pass)\n      CONFIG_NETFILTER_NETLINK: module (pass)\n      CONFIG_NF_NAT: module (pass)\n      CONFIG_IP_SET: IP set support: module (pass)\n        CONFIG_IP_SET_HASH_IP: hash:ip set support: module (pass)\n        CONFIG_IP_SET_HASH_NET: hash:net set support: module (pass)\n      CONFIG_IP_VS: IP virtual server support: module (pass)\n        CONFIG_IP_VS_NFCT: Netfilter connection tracking: built-in (pass)\n        CONFIG_IP_VS_SH: Source hashing scheduling: module (pass)\n        CONFIG_IP_VS_RR: Round-robin scheduling: module (pass)\n        CONFIG_IP_VS_WRR: Weighted round-robin scheduling: module (pass)\n      CONFIG_NF_CONNTRACK_IPV4: IPv4 connetion tracking support (required for NAT): unknown (warning)\n      CONFIG_NF_REJECT_IPV4: IPv4 packet rejection: module (pass)\n      CONFIG_NF_NAT_IPV4: IPv4 NAT: unknown (warning)\n      CONFIG_IP_NF_IPTABLES: IP tables support: module (pass)\n        CONFIG_IP_NF_FILTER: Packet filtering: module (pass)\n          CONFIG_IP_NF_TARGET_REJECT: REJECT target support: module (pass)\n        CONFIG_IP_NF_NAT: iptables NAT support: module (pass)\n        CONFIG_IP_NF_MANGLE: Packet mangling: module (pass)\n      CONFIG_NF_DEFRAG_IPV4: module (pass)\n      CONFIG_NF_CONNTRACK_IPV6: IPv6 connetion tracking support (required for NAT): unknown (warning)\n      CONFIG_NF_NAT_IPV6: IPv6 NAT: unknown (warning)\n      CONFIG_IP6_NF_IPTABLES: IP6 tables support: module (pass)\n        CONFIG_IP6_NF_FILTER: Packet filtering: module (pass)\n        CONFIG_IP6_NF_MANGLE: Packet mangling: module (pass)\n        CONFIG_IP6_NF_NAT: ip6tables NAT support: module (pass)\n      CONFIG_NF_DEFRAG_IPV6: module (pass)\n    CONFIG_BRIDGE: 802.1d Ethernet Bridging: module (pass)\n      CONFIG_LLC: module (pass)\n      CONFIG_STP: module (pass)\n  CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem: built-in (pass)\n  CONFIG_PROC_FS: /proc file system support: built-in (pass)\n
"},{"location":"raspberry-pi4/#deploy-a-node","title":"Deploy a node","text":"

Each node can now serve as a control plane node or worker node or both.

"},{"location":"raspberry-pi4/#as-single-node","title":"As single node","text":"

This is a self-contained single node setup which runs both control plane components and worker components. If you don't plan join any more nodes into the cluster, this is for you.

Install the k0scontroller service:

ubuntu@ubuntu:~$ sudo k0s install controller --single\nubuntu@ubuntu:~$ sudo systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start it:

ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 09:56:02 UTC; 2s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 2720 (k0s)\n      Tasks: 10\n     Memory: 24.7M\n        CPU: 4.654s\n     CGroup: /system.slice/k0scontroller.service\n             \u2514\u25002720 /usr/local/bin/k0s controller --single=true\n\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 6275509116227039894094374442676315636193163621\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 336800507542010809697469355930007636411790073226\n

When the cluster is up, try to have a look:

ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNAME     STATUS   ROLES           AGE     VERSION       INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nubuntu   Ready    control-plane   4m41s   v1.28.1+k0s   10.152.56.54   <none>        Ubuntu 22.04.1 LTS   5.15.0-1013-raspi   containerd://1.7.2\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE     IP             NODE     NOMINATED NODE   READINESS GATES\nkube-system   kube-proxy-kkv2l                  1/1     Running   0          4m44s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   kube-router-vf2pv                 1/1     Running   0          4m44s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   coredns-88b745646-wd4mp           1/1     Running   0          5m10s   10.244.0.2     ubuntu   <none>           <none>\nkube-system   metrics-server-7d7c4887f4-ssk49   1/1     Running   0          5m6s    10.244.0.3     ubuntu   <none>           <none>\n

Overall, the single k0s node uses less than 1 GiB of RAM:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       715Mi       1.3Gi       3.0Mi       1.7Gi       2.8Gi\nSwap:             0B          0B          0B\n
"},{"location":"raspberry-pi4/#as-a-controller-node","title":"As a controller node","text":"

This will install k0s as a single non-HA controller. It won't be able to run any workloads, so you need to connect more workers to it.

Install the k0scontroller service. Note that we're not specifying any flags:

ubuntu@ubuntu:~$ sudo k0s install controller\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start it:

ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 10:31:07 UTC; 3s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 1176 (k0s)\n      Tasks: 10\n     Memory: 30.2M\n        CPU: 8.936s\n     CGroup: /system.slice/k0scontroller.service\n             \u2514\u25001176 /usr/local/bin/k0s controller\n\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 723202396395786987172578079268287418983457689579\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 36297085497443583023060005045470362249819432477\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 728910847354665355109188021924183608444435075827\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generate received request\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] received CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generating key: rsa-2048\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 718948898553094584370065610752227487244528071083\n

As soon as the controller is up, we can try to inspect the API as we did for the single node:

ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNo resources found\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE   IP       NODE     NOMINATED NODE   READINESS GATES\nkube-system   coredns-88b745646-6tpwm           0/1     Pending   0          29s   <none>   <none>   <none>           <none>\nkube-system   metrics-server-7d7c4887f4-9k5k5   0/1     Pending   0          24s   <none>   <none>   <none>           <none>\n

As we see, there are no nodes and two pending pods. A control plane without workers. The memory consumption is below the single node controller, but not much:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       678Mi       2.3Gi       3.0Mi       758Mi       2.9Gi\nSwap:             0B          0B          0B\n

This controller runs a full-fledged control plane, backed by etcd, as opposed to the lightweight kine based one from the single node example. For the latter, k0s doesn't support joining new nodes.

More nodes can be added by creating join tokens. To add a worker node, create a token for it:

ubuntu@ubuntu:~$ sudo k0s token create --role worker\nH4sIAAAAAAAC/2yV0Y6jPBKF7/MUeYGZ30DonUTai5+Ak5DgbhuXHXwHmAnBhtAJHdKs9t1XnZmRdqW9K1cdfceyrDqzvD+L6no7X7rV/O7MSvtxG6rrbTX7Nv9dr2bz+Xx+q6736rqa18PQ31Z//eWg747vfvdfvvuL1cti4T1VZXUdzj/PZT5U3/KPob5cz8PnN50P+Wp+SNFwSJ01Ax3zcxAyEUMKKqYIA3vO0LA2TpwCC1hEQipFrxD2UogDhawQobWJY297jxHBCdbS70hIvWKTOMWGBcwhgUaMSegPhdPH+VY13GDGYNxTiwONdMSEJtTiLeVYMMALDn6dOKqXtt5r0WfQPpqK43cpWKBAecnWktxEiAvWVZEDghPCorhmXTlWp/7PTPz3jEPcVZF6p0KsFfIlNZiIiB11iFUhlJ+1jkxwn/EjU4kRnnI1zsEJkkiH4OHt2pI4a0gEINZUYEEhQinEkUb4qU0Rvn+9CQD5UKJ0dKfG1NVZ2dWCcfCkHFDKycjbYZuGIsk5DngY7Svcn3N5mdIGm1yylkU+Srcxyiy7l50ZRUTvGqtcNuK9QAvEjcihu4yJh/sipC5xy4nBssut9UrcB6nENz72JnfxKLBmxAseZftgyhHvfLIjaeK+PNYX2tmwkKQrGjPlSFAI2VRKmyZmidjnsGCefRfe6Vp4p6veBk0FCtaN/uBu7JAp9kS6nFKDCQvxVUXYsGPiFji+VU05UtFvdLt8oVK8JRE+5m6fZfbvBcGa8QhH0pzG6vxjLEOSEJvtZdRvhNSywNmCejEihiRMYp/IH34utZc6GpdwWwgbc9Hhh5Q+4ushLeXJEZ6t85YBCLxTTfwmGhyWW+HC2B+AE1DnYdK4l9pYJ/P0jhn1mrsq1MbHKYqcRO6cyuAQQG/kRlsq2aOK/HVp2FZKDVRqQg0OmNuz3MTB2jgBiXSQCGHYVmN6XnoAItDIrmnbBxDFHbdqB8ZZU5ktGMRAgQUApzuH3chQ9BCSRcrBR2riVCHxBt5ln3kYlXKxKKI6JEizV4wn3tWyMMk1N/iVtvpayvqaQ+nrKfj6gxMzOOCIBF/+cBQv4JG4AnATe0GZjUNy6gcWkkG5CJGpntKGTnzb472XfeqtekuQzqsWua+bpaw2j9d0ih02YZauh5y4/v7gqZzY2lYmVuWkahFqzF0cri1jbPu3n4d6nVp10G4fVw3OZbp8VabfaQfvtWN9zYNOdfVYmIWjz4PMzOOFmv5Nb3u39CgqXdUCth4xyxrwaQ8Oc3On9xIet3mHmewCj7kJgmP/pr3os5i0oLx+1+4yyj1mcwuTmDIko50DpndhWwNxHwcQQSuEGFljI0Z7lYJ1EhgnguJ3PukPYXr3VbJYOCdE5ECSFpBqgrDEpzFzRSfFxSUgIrJhUQZxW5jazxpCk445CfK3RMbHdcOGtL2N0O7uAuyCId8A0izZ4B2EseQb55EgwVX7+CyjmB9c1eSTVQXeLWiDj4CjUW7ZXXl9nR7pqDYKUXnZqyZ4r46x98bR/vduxtzQE0UiFZHdpEACEcFzLx/o5Z+z+bzL22o1N+g2Ky/dUD2GXznxq/6VE39C46n6anzcnqePorLV8K24XIbbcM37/6V9XK9VN3z7Q3o2zbnTq/n60v08n2b9tfpZXauurG6r+b/+PfuiPs1/Q/4P/mn8vMJwMVW3mrvL84/lj+8N8ia/uZ/Lf2izWFb57D8BAAD//zANvmsEBwAA\n

Save the join token for subsequent steps.

"},{"location":"raspberry-pi4/#as-a-worker-node","title":"As a worker node","text":"

To join an existing k0s cluster, create the join token file for the worker (where $TOKEN_CONTENT is one of the join tokens created in the control plane setup):

sudo sh -c 'mkdir -p /var/lib/k0s/ && umask 077 && echo \"$TOKEN_CONTENT\" > /var/lib/k0s/join-token'\n

After that, install the k0sworker service:

ubuntu@ubuntu:~$ sudo k0s install worker --token-file /var/lib/k0s/join-token\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cb k0sworker.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n     Active: inactive (dead)\n       Docs: https://docs.k0sproject.io\n

Start the service:

ubuntu@ubuntu:~$ sudo systemctl start k0sworker.service\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cf k0sworker.service - k0s - Zero Friction Kubernetes\n     Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n     Active: active (running) since Thu 2022-08-18 13:48:58 UTC; 2s ago\n       Docs: https://docs.k0sproject.io\n   Main PID: 1631 (k0s)\n      Tasks: 22\n     Memory: 181.7M\n        CPU: 4.010s\n     CGroup: /system.slice/k0sworker.service\n             \u251c\u25001631 /usr/local/bin/k0s worker --token-file=/var/lib/k0s/join-token\n             \u2514\u25001643 /var/lib/k0s/bin/containerd --root=/var/lib/k0s/containerd --state=/run/k0s/containerd --address=/run/k0s/containerd.sock --log-level=info --config=/etc/k0s/containerd.toml\n\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1643\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting OCIBundleReconciler\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: /run/systemd/resolve/resolv.conf\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1648\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Status\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Autopilot\"\n

As this is a worker node, we cannot access the Kubernetes API via the builtin k0s kc subcommand, but we can check the k0s API instead:

ubuntu@ubuntu:~$ sudo k0s status\nVersion: v1.28.1+k0s.0\nProcess ID: 1631\nRole: worker\nWorkloads: true\nSingleNode: false\n

The memory requirements are also pretty low:

ubuntu@ubuntu:~$ free -h\n               total        used        free      shared  buff/cache   available\nMem:           3.7Gi       336Mi       2.1Gi       3.0Mi       1.2Gi       3.2Gi\nSwap:             0B          0B          0B\n
"},{"location":"raspberry-pi4/#connect-to-the-cluster","title":"Connect to the cluster","text":"

On a controller node, generate a new raspi-cluster-master user with admin rights and get a kubeconfig for it:

ubuntu@ubuntu:~$ sudo k0s kc create clusterrolebinding raspi-cluster-master-admin --clusterrole=admin --user=raspi-cluster-master\nclusterrolebinding.rbac.authorization.k8s.io/raspi-cluster-master-admin created\nubuntu@ubuntu:~$ sudo k0s kubeconfig create --groups system:masters raspi-cluster-master\n\napiVersion: v1\nclusters:\n- cluster:\n    server: https://10.152.56.54:6443\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBRENDQWVpZ0F3SUJBZ0lVT2RSVzdWdm83UWR5dmdFZHRUK1V3WDN2YXdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5EQTFNREJhRncwegpNakE0TVRVeE5EQTFNREJhTUJneEZqQVVCZ05WQkFNVERXdDFZbVZ5Ym1WMFpYTXRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURsdy8wRFJtcG1xRjVnVElmN1o5bElRN0RFdUp6WDJLN1MKcWNvYk5oallFanBqbnBDaXFYOSt5T1R2cGgyUlRKN2tvaGkvUGxrYm5oM2pkeVQ3NWxSMGowSkV1elRMaUdJcApoR2pqc3htek5RRWVwb210R0JwZXNGeUE3NmxTNVp6WVJtT0lFQVgwb0liWjBZazhuU3pQaXBsWDMwcTFETEhGCkVIcSsyZG9vVXRIb09EaEdmWFRJTUJsclZCV3dCV3cxbmdnN0dKb01TN2tHblpYaUw2NFBiRDg5NmtjYXo0a28KTXhhZGc1ZmZQNStBV3JIVHhKV1d2YjNCMjEyOWx3R3FiOHhMTCt1cnVISHVjNEh4em9OVUt1WUlXc2lvQWp4YgphdDh6M1QwV2RnSit2VithWWlRNFlLeEVFdFB4cEMvUHk0czU0UHF4RzVZa0hiMDczMEUxQWdNQkFBR2pRakJBCk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTd2p4STIKRUxVNCtNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQ3k3dHFFMk5WT3E0Z0I1Ngp2clVZMFU0SWp1c0dUN0UzQ2xqSUtQODk2Mm9xdlpvU0NWb2U5YS9UQTR6ZXYrSXJwaTZ1QXFxc3RmT3JFcDJ4CmVwMWdYZHQrbG5nV0xlbXdWdEVOZ0xvSnBTM09Vc3N1ai9XcmJwSVU4M04xWVJTRzdzU21KdXhpa3pnVUhiUk8KZ01SLzIxSDFESzJFdmdQY2pHWXlGbUQzSXQzSjVNcnNiUHZTRG4rUzdWWWF0eWhIMUo4dmwxVDFpbzRWWjRTNgpJRFlaV05JOU10TUpqcGxXL01pRnlwTUhFU1E3UEhHeHpGVExoWFplS0pKSlRPYXFha1AxM3J1WFByVHVDQkl4CkFCSWQraU9qdGhSU3ZxbTFocGtHcmY4Rm9PdG1PYXZmazdDdnNJTWdUV2pqd2JJZWZIRU8zUmVBMzZWZWV3bXoKOFJHVUtBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n  name: k0s\ncontexts:\n- context:\n    cluster: k0s\n    user: raspi-cluster-master\n  name: k0s\ncurrent-context: k0s\nkind: Config\npreferences: {}\nusers:\n- name: raspi-cluster-master\n  user:\n    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYVENDQWtXZ0F3SUJBZ0lVV0ZZNkZ4cCtUYnhxQUxTVjM0REVMb0dEc3Q0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5ERTRNREJhRncweQpNekE0TVRneE5ERTRNREJhTURneEZ6QVZCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVIwd0d3WURWUVFECkV4UnlZWE53YVMxamJIVnpkR1Z5TFcxaGMzUmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTGJNalI5eHA1dDJzank1S0dEQnQ2dWl3QU4vaEhwZkFUNXJrZTFRblc2eFlZeDYzR2JBTXYrRQpjWmEyUEdPempQeVVTZThVdWp4ZnR0L1JWSTJRVkVIRGlJZ1ZDNk1tUUFmTm1WVlpKOHBFaTM2dGJZYUVxN3dxClhxYmJBQ0F0ZGtwNTJ0Y0RLVU9sRS9SV0tUSjN4bXUvRmh0OTIrRDdtM1RrZTE0TkJ5a1hvakk1a2xVWU9ySEMKVTN3V210eXlIUFpDMFBPdWpXSE5yeS9wOXFjZzRreWNDN0NzUVZqMWoxY2JwdXRpWllvRHNHV3piS0RTbExRZApyYnUwRnRVZVpUQzVPN2NuTk5tMU1EZldubXhlekw4L2N5dkJCYnRmMjhmcERFeEhMT2dTY2ZZUlZwUllPMzdvCk5yUjljMGNaZE9oZW5YVnlQcU1WVVlSNkQxMlRrY0VDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCUitqQTlGNm1jc25ob2NtMnd0dFNYY2tCaUpoakFmQmdOVkhTTUVHREFXZ0JTd2p4STJFTFU0CitNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBY2RRV3N4OUpHOUIxckxVc2Y1QzgKd1BzTkhkZURYeG1idm4zbXN3aFdVMEZHU1pjWjlkMTYzeXhEWnA4QlNzNWFjNnZqcU1lWlFyRThDUXdXYTlxVAowZVJXcTlFODYzcS9VcFVNN3lPM1BnMHd4RWtQSTVuSjRkM0o3MHA3Zk4zenpzMUJzU0h6Q2hzOWR4dE5XaVp5CnNINzdhbG9NanA0cXBEVWRyVWcyT0d4RWhRdzJIaXE3ZEprQm80a3hoWmhBc3lWTDdZRng0SDY3WkIzSjY4V3QKdTdiWnRmUVJZV3ZPUE9oS0pFdmlLVXptNDJBUlZXTDdhZHVESTBBNmpxbXhkTGNxKzlNWVlaNm1CT0NWakx1WgoybDlJSVI2NkdjOUdpdC9kSFdwbTVZbmozeW8xcUU0UVg4ZmVUQTczUlU5cmFIdkNpTGdVbFRaVUNGa3JNL0NtCndBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHN5TkgzR25tM2F5UExrb1lNRzNxNkxBQTMrRWVsOEJQbXVSN1ZDZGJyRmhqSHJjClpzQXkvNFJ4bHJZOFk3T00vSlJKN3hTNlBGKzIzOUZValpCVVFjT0lpQlVMb3laQUI4MlpWVmtueWtTTGZxMXQKaG9TcnZDcGVwdHNBSUMxMlNubmExd01wUTZVVDlGWXBNbmZHYTc4V0czM2I0UHViZE9SN1hnMEhLUmVpTWptUwpWUmc2c2NKVGZCYWEzTEljOWtMUTg2Nk5ZYzJ2TCtuMnB5RGlUSndMc0t4QldQV1BWeHVtNjJKbGlnT3daYk5zCm9OS1V0QjJ0dTdRVzFSNWxNTGs3dHljMDJiVXdOOWFlYkY3TXZ6OXpLOEVGdTEvYngra01URWNzNkJKeDloRlcKbEZnN2Z1ZzJ0SDF6UnhsMDZGNmRkWEkrb3hWUmhIb1BYWk9Sd1FJREFRQUJBb0lCQUFpYytzbFFnYVZCb29SWgo5UjBhQTUyQ3ZhbHNpTUY3V0lPb2JlZlF0SnBTb1ZZTk0vVmplUU94S2VrQURUaGxiVzg1VFlLR1o0QVF3bjBwClQrS2J1bHllNmYvL2ZkemlJSUk5bmN2M3QzaEFZcEpGZWJPczdLcWhGSFNvUFFsSEd4dkhRaGgvZmFKQ1ZQNWUKVVBLZjBpbWhoMWtrUlFnRTB2NWZCYkVZekEyVGl4bThJSGtQUkdmZWN4WmF1VHpBS2VLR0hjTFpDem8xRHhlSgp3bHpEUW9YWDdHQnY5MGxqR1pndENXcFEyRUxaZ1NwdW0rZ0crekg1WFNXZXgwMzJ4d0NhbkdDdGcyRmxHd2V2Ck9PaG8zSjNrRWVJR1MzSzFJY24rcU9hMjRGZmgvcmRsWXFSdStWeEZ4ZkZqWGxaUjdjZkF4Mnc1Z3NmWm9CRXIKUE1oMTdVRUNnWUVBejZiTDc4RWsvZU1jczF6aWdaVVpZcE5qa2FuWHlsS3NUUWM1dU1pRmNORFdObFkxdlQzVQprOHE5cHVLbnBZRVlTTGVVTS9tSWk5TVp6bmZjSmJSL0hJSG9YVjFMQVJ2blQ0djN3T0JsaDc5ajdKUjBpOW1OClYrR0Q1SlNPUmZCVmYxVlJHRXN6d3ZhOVJsS2lMZ0JVM2tKeWN2Q09jYm5aeFltSXRrbDhDbXNDZ1lFQTRWeG4KZTY2QURIYmR3T0plbEFSKytkVHh5eVYyRjY1SEZDNldPQVh2RVRucGRudnRRUUprWWhNYzM1Y2gvMldmZDBWYQpZb3lGZE9kRThKZSsvcWxuS1pBc3BHRC9yZHp2VmFteHQ4WXdrQXU5Q1diZWw2VENPYkZOQ2hjK1NUbmRqN0duCmlSUHprM1JYMnBEVi9OaW5FVFA0TEJnTHJQYkxlSVAwSzZ4bjk0TUNnWUVBeXZGMmNVendUVjRRNTgrSTVDS0gKVzhzMnpkOFRzbjVZUFRRcG1zb0hlTG55RWNyeDNKRTRXSFVXSTZ0ek01TFczQUxuU21DL3JnQlVRWER0Yk1CYQpWczh6L1VPM2tVN25JOXhrK0ZHWGlUTnBnb2VZM0RGMExZYVBNL0JvbUR3S0kxZUwyVlZ1TWthWnQ4ZjlEejV0CnM0ZDNlWlJYY3hpem1KY1JVUzdDbHg4Q2dZQk45Vmc2K2RlRCtFNm4zZWNYenlKWnJHZGtmZllISlJ1amlLWWcKaFRUNVFZNVlsWEF5Yi9CbjJQTEJDaGdSc0lia2pKSkN5eGVUcERrOS9WQnQ2ZzRzMjVvRjF5UTdjZFU5VGZHVApnRFRtYjVrYU9vSy85SmZYdTFUS0s5WTVJSkpibGZvOXVqQWxqemFnL2o5NE16NC8vamxZajR6aWJaRmZoRTRnCkdZanhud0tCZ0U1cFIwMlVCa1hYL3IvdjRqck52enNDSjR5V3U2aWtpem00UmJKUXJVdEVNd1Y3a2JjNEs0VFIKM2s1blo1M1J4OGhjYTlMbXREcDJIRWo2MlBpL2pMR0JTN0NhOCtQcStxNjZwWWFZTDAwWnc4UGI3OVMrUmpzQQpONkNuQWg1dDFYeDhVMTIvWm9JcjBpOWZDaERuNlBqVEM0MVh5M1EwWWd6TW5jYXMyNVBiCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n

Using the above kubeconfig, you can now access and use the cluster:

ubuntu@ubuntu:~$ KUBECONFIG=/path/to/kubeconfig kubectl get nodes,deployments,pods -owide -A\nNAME          STATUS   ROLES    AGE    VERSION       INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nnode/ubuntu   Ready    <none>   5m1s   v1.28.1+k0s   10.152.56.54   <none>        Ubuntu 22.04.1 LTS   5.15.0-1013-raspi   containerd://1.7.2\n\nNAMESPACE     NAME                             READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS       IMAGES                                                 SELECTOR\nkube-system   deployment.apps/coredns          1/1     1            1           33m   coredns          registry.k8s.io/coredns/coredns:v1.7.0                 k8s-app=kube-dns\nkube-system   deployment.apps/metrics-server   1/1     1            1           33m   metrics-server   registry.k8s.io/metrics-server/metrics-server:v0.6.4   k8s-app=metrics-server\n\nNAMESPACE     NAME                                  READY   STATUS    RESTARTS   AGE    IP             NODE     NOMINATED NODE   READINESS GATES\nkube-system   pod/coredns-88b745646-pkk5w           1/1     Running   0          33m    10.244.0.5     ubuntu   <none>           <none>\nkube-system   pod/konnectivity-agent-h4nfj          1/1     Running   0          5m1s   10.244.0.6     ubuntu   <none>           <none>\nkube-system   pod/kube-proxy-qcgzs                  1/1     Running   0          5m1s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   pod/kube-router-6lrht                 1/1     Running   0          5m1s   10.152.56.54   ubuntu   <none>           <none>\nkube-system   pod/metrics-server-7d7c4887f4-wwbkk   1/1     Running   0          33m    10.244.0.4     ubuntu   <none>           <none>\n
"},{"location":"reinstall-k0sctl/","title":"Reinstall a node","text":"

k0sctl currently does not support changing all the configuration of containerd (state, root) on the fly.

For example, in order to move containerd's root directory to a new partition/drive, you have to provide --data-dir /new/drive in your k0sctl installFlags for each (worker) node. --data-dir is an option of k0s and then added to the service unit.

The following is an example of that:

# spec.hosts[*].installFlags\n- role: worker\ninstallFlags:\n- --profile flatcar\n- --enable-cloud-provider\n- --data-dir /new/drive\n- --kubelet-extra-args=\"--cloud-provider=external\"\n

However, the installFlags are only used when the node is installed.

"},{"location":"reinstall-k0sctl/#steps","title":"Steps","text":"

Drain the node:

kubectl drain node.hostname\n

Access your node (e.g. via ssh) to stop and reset k0s:

sudo k0s stop\nsudo k0s reset\n

Reboot the node (for good measure):

sudo systemctl reboot\n

Once the node is available again, run k0sctl apply to integrate it into your cluster and uncordon the node to allow pods to be scheduled:

k0sctl apply -c config.yaml\nkubectl uncordon node.hostname\n
"},{"location":"releases/","title":"Releases","text":"

This page describes how we release and support the k0s project. Mirantis Inc. can also provide commercial support for k0s.

"},{"location":"releases/#upstream-kubernetes-release-support-cycle","title":"Upstream Kubernetes release & support cycle","text":"

This release and support cycle is followed for ALL new minor releases. A minor release can be e.g. 1.25, 1.26 and so on. What this means in practice is that every 4 months there is a new minor release published.

After a minor release is published, the upstream community is maintaining it for 14 months. Maintenance in this case means that upstream Kubernetes provides bug fixes, CVE mitigations and such for 14 months per minor release.

"},{"location":"releases/#k0s-release-and-support-model","title":"k0s release and support model","text":"

Starting from the k0s 1.21, k0s started following the Kubernetes project's release and support model.

k0s project follows closely the upstream Kubernetes release cycle. The only difference to upstream Kubernetes release / maintenance schedule is that our initial release date is always a few weeks behind the upstream Kubernetes version release date as we are building our version of k0s from the officially released version of Kubernetes and need time for testing the final version before shipping.

Given the fact that upstream Kubernetes provides support and patch releases for a minor version for roughly 14 months, it means that k0s will follow this same model. Each minor release is maintained for roughly 14 months since its initial release.

k0s project will typically include patches and fixes included in a Kubernetes upstream patch release for the fixes needed in k0s own codebase. For example, if a bug is identified in 1.26 series k0s project will create and ship a fix for it with the next upstream Kubernetes 1.26.x release. In rare cases where a critical bug is identified we may also ship \u201cout of band\u201d patches. Such out-of-band release would be identified in the version string suffix. For example a normal release following Kubernetes upstream would be 1.26.3+k0s.0 whereas a critical out-of-band patch would be identified as 1.26.3+k0s.1.

"},{"location":"releases/#new-features-and-enhancements","title":"New features and enhancements","text":"

The biggest new k0s features will typically only be delivered on top of the latest Kubernetes version, but smaller enhancements can be included in older release tracks as well.

"},{"location":"releases/#version-string","title":"Version string","text":"

The k0s version string consists of the Kubernetes version and the k0s version. For example:

  • v1.28.1+k0s.0

The Kubernetes version (1.28.1) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.

"},{"location":"remove_controller/","title":"Remove or replace a controller","text":"

You can manually remove or replace a controller from a multi-node k0s cluster (>=3 controllers) without downtime. However, you have to maintain quorum on Etcd while doing so.

"},{"location":"remove_controller/#remove-a-controller","title":"Remove a controller","text":"

If your controller is also a worker (k0s controller --enable-worker), you first have to delete the controller from Kubernetes itself. To do so, run the following commands from the controller:

# Remove the containers from the node and cordon it\nk0s kubectl drain --ignore-daemonsets --delete-emptydir-data <controller>\n# Delete the node from the cluster\nk0s kubectl delete node <controller>\n

Then you need to remove it from the Etcd cluster. For example, if you want to remove controller01 from a cluster with 3 controllers:

# First, list the Etcd members\nk0s etcd member-list\n{\"members\":{\"controller01\":\"<PEER_ADDRESS1>\", \"controller02\": \"<PEER_ADDRESS2>\", \"controller03\": \"<PEER_ADDRESS3>\"}}\n# Then, remove the controller01 using its peer address\nk0s etcd leave --peer-address \"<PEER_ADDRESS1>\"\n

The controller is now removed from the cluster. To reset k0s on the machine, run the following commands:

k0s stop\nk0s reset\nreboot\n
"},{"location":"remove_controller/#replace-a-controller","title":"Replace a controller","text":"

To replace a controller, you first remove the old controller (like described above) then follow the manual installation procedure to add the new one.

"},{"location":"reset/","title":"Uninstall/Reset","text":"

k0s can be uninstalled locally with k0s reset command and remotely with k0sctl reset command. They remove all k0s-related files from the host.

reset operates under the assumption that k0s is installed as a service on the host.

"},{"location":"reset/#uninstall-a-k0s-node-locally","title":"Uninstall a k0s node locally","text":"

To prevent accidental triggering, k0s reset will not run if the k0s service is running, so you must first stop the service:

  1. Stop the service:

    sudo k0s stop\n
  2. Invoke the reset command:

    $ sudo k0s reset\nINFO[2021-06-29 13:08:39] * containers steps\nINFO[2021-06-29 13:08:44] successfully removed k0s containers!\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * remove k0s users step:\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * uninstall service step\nINFO[2021-06-29 13:08:44] Uninstalling the k0s service\nINFO[2021-06-29 13:08:45] * remove directories step\nINFO[2021-06-29 13:08:45] * CNI leftovers cleanup step\nINFO k0s cleanup operations done. To ensure a full reset, a node reboot is recommended.\n
"},{"location":"reset/#uninstall-a-k0s-cluster-using-k0sctl","title":"Uninstall a k0s cluster using k0sctl","text":"

k0sctl can be used to connect each node and remove all k0s-related files and processes from the hosts.

  1. Invoke k0sctl reset command:
    $ k0sctl reset --config k0sctl.yaml\nk0sctl v0.9.0 Copyright 2021, k0sctl authors.\n\n? Going to reset all of the hosts, which will destroy all configuration and data, Are you sure? Yes\nINFO ==> Running phase: Connect to hosts \nINFO [ssh] 13.53.43.63:22: connected              \nINFO [ssh] 13.53.218.149:22: connected            INFO ==> Running phase: Detect host operating systems \nINFO [ssh] 13.53.43.63:22: is running Ubuntu 20.04.2 LTS \nINFO [ssh] 13.53.218.149:22: is running Ubuntu 20.04.2 LTS INFO ==> Running phase: Prepare hosts    INFO ==> Running phase: Gather k0s facts \nINFO [ssh] 13.53.43.63:22: found existing configuration \nINFO [ssh] 13.53.43.63:22: is running k0s controller version 1.28.1+k0s.0\nINFO [ssh] 13.53.218.149:22: is running k0s worker version 1.28.1+k0s.0\nINFO [ssh] 13.53.43.63:22: checking if worker  has joined INFO ==> Running phase: Reset hosts      \nINFO [ssh] 13.53.43.63:22: stopping k0s           \nINFO [ssh] 13.53.218.149:22: stopping k0s         \nINFO [ssh] 13.53.218.149:22: running k0s reset    \nINFO [ssh] 13.53.43.63:22: running k0s reset      INFO ==> Running phase: Disconnect from hosts INFO ==> Finished in 8s                  
"},{"location":"runtime/","title":"Runtime","text":"

k0s uses containerd as the default Container Runtime Interface (CRI) and runc as the default low-level runtime. In most cases they don't require any configuration changes. However, if custom configuration is needed, this page provides some examples.

"},{"location":"runtime/#containerd-configuration","title":"containerd configuration","text":"

By default k0s manages the full containerd configuration. User has the option of fully overriding, and thus also managing, the configuration themselves.

"},{"location":"runtime/#user-managed-containerd-configuration","title":"User managed containerd configuration","text":"

In the default k0s generated configuration there's a \"magic\" comment telling k0s it is k0s managed:

# k0s_managed=true\n

If you wish to take over the configuration management remove this line.

To make changes to containerd configuration you must first generate a default containerd configuration, with the default values set to /etc/k0s/containerd.toml:

containerd config default > /etc/k0s/containerd.toml\n

k0s runs containerd with the following default values:

/var/lib/k0s/bin/containerd \\\n--root=/var/lib/k0s/containerd \\\n--state=/run/k0s/containerd \\\n--address=/run/k0s/containerd.sock \\\n--config=/etc/k0s/containerd.toml\n

Next, add the following default values to the configuration file:

version = 2\nroot = \"/var/lib/k0s/containerd\"\nstate = \"/run/k0s/containerd\"\n...\n\n[grpc]\naddress = \"/run/k0s/containerd.sock\"\n
"},{"location":"runtime/#k0s-managed-dynamic-runtime-configuration","title":"k0s managed dynamic runtime configuration","text":"

From 1.27.1 onwards k0s enables dynamic configuration on containerd CRI runtimes. This works by k0s creating a special directory in /etc/k0s/containerd.d/ where user can drop-in partial containerd configuration snippets.

k0s will automatically pick up these files and adds these in containerd configuration imports list. If k0s sees the configuration drop-ins are CRI related configurations k0s will automatically collect all these into a single file and adds that as a single import file. This is to overcome some hard limitation on containerd 1.X versions. Read more at containerd#8056

"},{"location":"runtime/#examples","title":"Examples","text":"

Following chapters provide some examples how to configure different runtimes for containerd using k0s managed drop-in configurations.

"},{"location":"runtime/#using-gvisor","title":"Using gVisor","text":"

gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system.

  1. Install the needed gVisor binaries into the host.

    (\nset -e\n  ARCH=$(uname -m)\nURL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}\nwget ${URL}/runsc ${URL}/runsc.sha512 \\\n${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512\n  sha512sum -c runsc.sha512 \\\n-c containerd-shim-runsc-v1.sha512\n  rm -f *.sha512\n  chmod a+rx runsc containerd-shim-runsc-v1\n  sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin\n)\n

    Refer to the gVisor install docs for more information.

  2. Prepare the config for k0s managed containerD, to utilize gVisor as additional runtime:

    cat <<EOF | sudo tee /etc/k0s/containerd.d/gvisor.toml\nversion = 2\n\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc]\n  runtime_type = \"io.containerd.runsc.v1\"\nEOF\n
  3. Start and join the worker into the cluster, as normal:

    k0s worker $token\n
  4. Register containerd to the Kubernetes side to make gVisor runtime usable for workloads (by default, containerd uses normal runc as the runtime):

    cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n  name: gvisor\nhandler: runsc\nEOF\n

    At this point, you can use gVisor runtime for your workloads:

    apiVersion: v1\nkind: Pod\nmetadata:\nname: nginx-gvisor\nspec:\nruntimeClassName: gvisor\ncontainers:\n- name: nginx\nimage: nginx\n
  5. (Optional) Verify that the created nginx pod is running under gVisor runtime:

    # kubectl exec nginx-gvisor -- dmesg | grep -i gvisor\n[    0.000000] Starting gVisor...\n
"},{"location":"runtime/#using-nvidia-container-runtime","title":"Using nvidia-container-runtime","text":"

First, install the NVIDIA runtime components:

distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list\nsudo apt-get update && sudo apt-get install -y nvidia-container-runtime\n

Next, drop in the containerd runtime configuration snippet into /etc/k0s/containerd.d/nvidia.toml

[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia]\nprivileged_without_host_devices = false\nruntime_engine = \"\"\nruntime_root = \"\"\nruntime_type = \"io.containerd.runc.v1\"\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia.options]\nBinaryName = \"/usr/bin/nvidia-container-runtime\"\n

Create the needed RuntimeClass:

cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n  name: nvidia\nhandler: nvidia\nEOF\n

Note Detailed instruction on how to run nvidia-container-runtime on your node is available here.

"},{"location":"runtime/#using-custom-cri-runtime","title":"Using custom CRI runtime","text":"

Warning: You can use your own CRI runtime with k0s (for example, docker). However, k0s will not start or manage the runtime, and configuration is solely your responsibility.

Use the option --cri-socket to run a k0s worker with a custom CRI runtime. the option takes input in the form of <type>:<socket_path> (for type, use docker for a pure Docker setup and remote for anything else).

"},{"location":"runtime/#using-dockershim","title":"Using dockershim","text":"

To run k0s with a pre-existing Dockershim setup, run the worker with k0s worker --cri-socket docker:unix:///var/run/cri-dockerd.sock <token>. A detailed explanation on dockershim and a guide for installing cri-dockerd can be found in our k0s dockershim guide.

"},{"location":"selinux/","title":"SELinux Overview","text":"

SELinux enforces mandatory access control policies that confine user programs and system services, as well as access to files and network resources. Limiting privilege to the minimum required to work reduces or eliminates the ability of these programs and daemons to cause harm if faulty or compromised.

Enabling SELinux in container runtime provides an additional security control to help further enforce isolation among deployed containers and the host.

This guide describes how to enable SELinux in Kubernetes environment provided by k0s on CentOS and Red Hat Enterprise Linux (RHEL).

"},{"location":"selinux/#requirements","title":"Requirements","text":"
  • SELinux is enabled on host OS of the worker nodes.
  • SELinux has the container-selinux policy installed.
  • SELinux labels are correctly set for k0s installation files of the worker nodes.
  • SELinux is enabled in container runtime such as containerd on the worker nodes.
"},{"location":"selinux/#check-whether-selinux-is-enabled-on-host-os","title":"Check whether SELinux is enabled on host OS","text":"

SELinux is enabled on CentOS and RHEL by default. Below command output indicates SELinux is enabled.

$ getenforce\nEnforcing\n
"},{"location":"selinux/#install-container-selinux","title":"Install container-selinux","text":"

It is required to have container-selinux installed. In most Fedora based distributions including Fedora 37, Red Hat Enterprise Linux 7, 8 and 8, CentOS 7 and 8 and Rocky Linux 9 this can be achieved by installing the package container-selinux.

In RHEL 7 and CentOS 7 this is achieved by running:

yum install -y container-selinux\n

In the rest of the metnioned distributions run:

dnf install -y container-selinux\n
"},{"location":"selinux/#set-selinux-labels-for-k0s-installation-files","title":"Set SELinux labels for k0s installation files","text":"

Run below commands on the host OS of the worker nodes.

DATA_DIR=\"/var/lib/k0s\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/containerd.*\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/runc\"\nsudo restorecon -R -v ${DATA_DIR}/bin\nsudo semanage fcontext -a -t container_var_lib_t \"${DATA_DIR}/containerd(/.*)?\"\nsudo semanage fcontext -a -t container_ro_file_t \"${DATA_DIR}/containerd/io.containerd.snapshotter.*/snapshots(/.*)?\"\nsudo restorecon -R -v ${DATA_DIR}/containerd\n
"},{"location":"selinux/#enable-selinux-in-containerd-of-k0s","title":"Enable SELinux in containerd of k0s","text":"

Add below lines to /etc/k0s/containerd.toml of the worker nodes. You need to restart k0s service on the node to make the change take effect.

[plugins.\"io.containerd.grpc.v1.cri\"]\nenable_selinux = true\n
"},{"location":"selinux/#verify-selinux-works-in-kubernetes-environment","title":"Verify SELinux works in Kubernetes environment","text":"

By following the example Assign SELinux labels to a Container, deploy a testing pod using below YAML file:

apiVersion: v1\nkind: Pod\nmetadata:\nname: test-selinux\nspec:\ncontainers:\n- image: busybox\nname: test-selinux\ncommand: [\"sleep\", \"infinity\"]\nsecurityContext:\nseLinuxOptions:\nlevel: \"s0:c123,c456\"\n

After the pod starts, ssh to the worker node on which the pod is running and check the pod process. It should display the label s0:c123,c456 that you sepecified in YAML file:

$ ps -efZ | grep -F 'sleep infinity'\nsystem_u:system_r:container_t:s0:c123,c456 root 3346 3288  0 16:39 ?       00:00:00 sleep infinity\n
"},{"location":"shell-completion/","title":"Enabling Shell Completion","text":"

Generate the k0s completion script using the k0s completion <shell_name> command, for Bash, Zsh, fish, or PowerShell.

Sourcing the completion script in your shell enables k0s autocompletion.

"},{"location":"shell-completion/#bash","title":"Bash","text":"
echo 'source <(k0s completion bash)' >>~/.bashrc\n

To load completions for each session, execute once:

k0s completion bash > /etc/bash_completion.d/k0s\n
"},{"location":"shell-completion/#zsh","title":"Zsh","text":"

If shell completion is not already enabled in Zsh environment you will need to enable it:

echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n

To load completions for each session, execute once:

k0s completion zsh > \"${fpath[1]}/_k0s\"\n

Note: You must start a new shell for the setup to take effect.

"},{"location":"shell-completion/#fish","title":"Fish","text":"
k0s completion fish | source\n

To load completions for each session, execute once:

k0s completion fish > ~/.config/fish/completions/k0s.fish\n
"},{"location":"storage/","title":"Storage","text":""},{"location":"storage/#bundled-openebs-storage","title":"Bundled OpenEBS storage","text":"

K0s comes out with bundled OpenEBS installation which can be enabled by using configuration file

Use following configuration as an example:

spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n

The cluster will have two storage classes available for you to use:

k0s kubectl get storageclass\n
NAME               PROVISIONER        RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE\nopenebs-device     openebs.io/local   Delete          WaitForFirstConsumer   false                  24s\nopenebs-hostpath   openebs.io/local   Delete          WaitForFirstConsumer   false                  24s\n

The openebs-hostpath is the storage class that maps to the /var/openebs/local

The openebs-device is not configured and could be configured by manifest deployer accordingly to the OpenEBS documentation

"},{"location":"storage/#example-usage","title":"Example usage","text":"

Use following manifests as an example of pod with mounted volume:

apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: nginx-pvc\nnamespace: default\nspec:\naccessModes:\n- ReadWriteOnce\nstorageClassName: openebs-hostpath\nresources:\nrequests:\nstorage: 5Gi\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx\nnamespace: default\nlabels:\napp: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nstrategy:\ntype: Recreate\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- image: nginx name: nginx\nvolumeMounts:\n- name: persistent-storage\nmountPath: /var/lib/nginx\nvolumes:\n- name: persistent-storage\npersistentVolumeClaim:\nclaimName: nginx-pvc\n
k0s kubectl apply -f nginx.yaml\n
persistentvolumeclaim/nginx-pvc created\ndeployment.apps/nginx created\nbash-5.1# k0s kc get pods\nNAME                    READY   STATUS    RESTARTS   AGE\nnginx-d95bcb7db-gzsdt   1/1     Running   0          30s\n
k0s kubectl get pv\n
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS       REASON   AGE\npvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f   5Gi        RWO            Delete           Bound    default/nginx-pvc   openebs-hostpath            30s\n
"},{"location":"storage/#csi","title":"CSI","text":"

k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs.

When the storage solution implements Container Storage Interface (CSI), containers can communicate with the storage for creation and configuration of persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the Kubernetes Blog.

"},{"location":"storage/#installing-3rd-party-storage-solutions","title":"Installing 3rd party storage solutions","text":"

Follow your storage driver's installation instructions. Note that the Kubelet installed by k0s uses a slightly different path for its working directory (/varlib/k0s/kubelet instead of /var/lib/kubelet). Consult the CSI driver's configuration documentation on how to customize this path.

"},{"location":"storage/#example-storage-solutions","title":"Example storage solutions","text":"

Different Kubernetes storage solutions are explained in the official Kubernetes storage documentation. All of them can be used with k0s. Here are some popular ones:

  • Rook-Ceph (Open Source)
  • MinIO (Open Source)
  • Gluster (Open Source)
  • Longhorn (Open Source)
  • Amazon EBS
  • Google Persistent Disk
  • Azure Disk
  • Portworx

If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook in here.

"},{"location":"system-monitoring/","title":"System components monitoring","text":"

Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.

k0s provides a mechanism to expose system components for monitoring. System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. You can read more about metrics for Kubernetes system components here.

Note: the mechanism is an opt-in feature, you can enable it on installation:

sudo k0s install controller --enable-metrics-scraper\n
"},{"location":"system-monitoring/#jobs","title":"Jobs","text":"

The list of components which is scrapped by k0s:

  • kube-scheduler
  • kube-controller-manager
  • etcd
  • kine

Note: kube-apiserver metrics are not scrapped since they are accessible via kubernetes endpoint within the cluster.

"},{"location":"system-monitoring/#architecture","title":"Architecture","text":"

k0s uses pushgateway with TTL to make it possible to detect issues with the metrics delivery. Default TTL is 2 minutes.

"},{"location":"system-requirements/","title":"System requirements","text":"

This page describes the system requirements for k0s.

"},{"location":"system-requirements/#minimum-memory-and-cpu-requirements","title":"Minimum memory and CPU requirements","text":"

The minimum requirements for k0s detailed below are approximations, and thus your results may vary.

Role Memory (RAM) Virtual CPU (vCPU) Controller node 1 GB 1 vCPU Worker node 0.5 GB 1 vCPU Controller + worker 1 GB 1 vCPU"},{"location":"system-requirements/#controller-node-recommendations","title":"Controller node recommendations","text":"# of Worker nodes # of Pods Recommended RAM Recommended vCPU up to 10 up to 1000 1-2 GB 1-2 vCPU up to 50 up to 5000 2-4 GB 2-4 vCPU up to 100 up to 10000 4-8 GB 2-4 vCPU up to 500 up to 50000 8-16 GB 4-8 vCPU up to 1000 up to 100000 16-32 GB 8-16 vCPU up to 5000 up to 150000 32-64 GB 16-32 vCPU

k0s has the standard Kubernetes limits for the maximum number of nodes, pods, etc. For more details, see the Kubernetes considerations for large clusters.

k0s controller node measured memory consumption can be found below on this page.

"},{"location":"system-requirements/#storage","title":"Storage","text":"

It's recommended to use an SSD for optimal storage performance (cluster latency and throughput are sensitive to storage).

The specific storage consumption for k0s is as follows:

Role Storage (k0s part) Controller node ~0.5 GB Worker node ~1.3 GB Controller + worker ~1.7 GB

Note: The operating system and application requirements must be considered in addition to the k0s part.

"},{"location":"system-requirements/#host-operating-system","title":"Host operating system","text":"
  • Linux (see Linux specific requirements for details)
  • Windows Server 2019
"},{"location":"system-requirements/#architecture","title":"Architecture","text":"
  • x86-64
  • ARM64
  • ARMv7
"},{"location":"system-requirements/#networking","title":"Networking","text":"

For information on the required ports and protocols, refer to networking.

"},{"location":"system-requirements/#external-runtime-dependencies","title":"External runtime dependencies","text":"

k0s strives to be as independent from the OS as possible. The current and past external runtime dependencies are documented here.

To run some automated compatiblility checks on your system, use k0s sysinfo.

"},{"location":"system-requirements/#controller-node-measured-memory-consumption","title":"Controller node measured memory consumption","text":"

The following table shows the measured memory consumption in the cluster of one controller node.

# of Worker nodes # of Pods (besides default) Memory consumption 1 0 510 MB 1 100 600 MB 20 0 660 MB 20 2000 1000 MB 50 0 790 MB 50 5000 1400 MB 100 0 1000 MB 100 10000 2300 MB 200 0 1500 MB 200 20000 3300 MB

Measurement details:

  • k0s v1.22.4+k0s.2 (default configuration with etcd)
  • Ubuntu Server 20.04.3 LTS, OS part of the used memory was around 180 MB
  • Hardware: AWS t3.xlarge (4 vCPUs, 16 GB RAM)
  • Pod image: nginx:1.21.4
"},{"location":"troubleshooting/","title":"Common Pitfalls","text":"

There are few common cases we've seen where k0s fails to run properly.

"},{"location":"troubleshooting/#coredns-in-crashloop","title":"CoreDNS in crashloop","text":"

The most common case we've encountered so far has been CoreDNS getting into crashloop on the node(s).

With kubectl you see something like this:

$ kubectl get pod --all-namespaces\nNAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE\nkube-system   calico-kube-controllers-5f6546844f-25px6   1/1     Running   0          167m\nkube-system   calico-node-fwjx5                          1/1     Running   0          164m\nkube-system   calico-node-t4tx5                          1/1     Running   0          164m\nkube-system   calico-node-whwsg                          1/1     Running   0          164m\nkube-system   coredns-5c98d7d4d8-tfs4q                   1/1     Error     17         167m\nkube-system   konnectivity-agent-9jkfd                   1/1     Running   0          164m\nkube-system   konnectivity-agent-bvhdb                   1/1     Running   0          164m\nkube-system   konnectivity-agent-r6mzj                   1/1     Running   0          164m\nkube-system   kube-proxy-kr2r9                           1/1     Running   0          164m\nkube-system   kube-proxy-tbljr                           1/1     Running   0          164m\nkube-system   kube-proxy-xbw7p                           1/1     Running   0          164m\nkube-system   metrics-server-7d4bcb75dd-pqkrs            1/1     Running   0          167m\n

When you check the logs, it'll show something like this:

kubectl -n kube-system logs coredns-5c98d7d4d8-tfs4q\n
plugin/loop: Loop (127.0.0.1:55953 -> :1053) detected for zone \".\", see https://coredns.io/plugins/loop#troubleshooting. Query: \"HINFO 4547991504243258144.3688648895315093531.\"\n

This is most often caused by systemd-resolved stub (or something similar) running locally and CoreDNS detects a possible loop with DNS queries.

The easiest but most crude way to workaround is to disable the systemd-resolved stub and revert the hosts /etc/resolv.conf to original

Read more at CoreDNS troubleshooting docs.

"},{"location":"troubleshooting/#k0s-controller-fails-on-arm-boxes","title":"k0s controller fails on ARM boxes","text":"

In the logs you probably see etcd not starting up properly.

Etcd is not fully supported on ARM architecture, thus you need to run k0s controller and thus also etcd process with env ETCD_UNSUPPORTED_ARCH=arm.

As etcd is not fully supported on ARM, it also means that the k0s control plane with etcd itself is not fully supported on ARM either.

"},{"location":"troubleshooting/#k0s-will-not-start-on-zfs-based-systems","title":"k0s will not start on ZFS-based systems","text":"

On ZFS-based systems k0s will fail to start because containerd runs by default in overlayfs mode to manage image layers. This is not compatible with ZFS and requires a custom config of containerd. The following steps should get k0s working on ZFS-based systems:

  • check with $ ctr -a /run/k0s/containerd.sock plugins ls that the containerd ZFS snapshotter plugin is in ok state (should be the case if ZFS kernel modules and ZFS userspace utils are correctly configured):
TYPE                            ID                       PLATFORMS      STATUS    \n...\nio.containerd.snapshotter.v1    zfs                      linux/amd64    ok\n...\n
  • create a containerd config according to the documentation: $ containerd config default > /etc/k0s/containerd.toml
  • modify the line in /etc/k0s/containerd.toml:
...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"overlayfs\"\n...\n

to

...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"zfs\"\n...\n
  • create a ZFS dataset to be used as snapshot storage at your desired location, e.g. $ zfs create -o mountpoint=/var/lib/k0s/containerd/io.containerd.snapshotter.v1.zfs rpool/containerd
  • install k0s as usual, e.g $ k0s install controller --single -c /etc/k0s/k0s.yaml
  • containerd should be launched with ZFS support and k0s should initialize the cluster correctly
"},{"location":"troubleshooting/#pods-pending-when-using-cloud-providers","title":"Pods pending when using cloud providers","text":"

Once we enable cloud provider support on kubelet on worker nodes, kubelet will automatically add a taint node.cloudprovider.kubernetes.io/uninitialized for the node. This tain will prevent normal workloads to be scheduled on the node until the cloud provider controller actually runs second initialization on the node and removes the taint. This means that these nodes are not available for scheduling until the cloud provider controller is actually successfully running on the cluster.

For troubleshooting your specific cloud provider see its documentation.

"},{"location":"troubleshooting/#k0s-not-working-with-read-only-usr","title":"k0s not working with read only /usr","text":"

By default k0s does not run on nodes where /usr is read only.

This can be fixed by changing the default path for volumePluginDir in your k0s config. You will need to change to values, one for the kubelet itself, and one for Calico.

Here is a snippet of an example config with the default values changed:

spec:\ncontrollerManager:\nextraArgs:\nflex-volume-plugin-dir: \"/etc/kubernetes/kubelet-plugins/volume/exec\"\nnetwork:\ncalico:\nflexVolumeDriverPath: /etc/k0s/kubelet-plugins/volume/exec/nodeagent~uds\nworkerProfiles:\n- name: coreos\nvalues:\nvolumePluginDir: /etc/k0s/kubelet-plugins/volume/exec/\n

With this config you can start your controller as usual. Any workers will need to be started with

k0s worker --profile coreos [TOKEN]\n
"},{"location":"troubleshooting/#profiling","title":"Profiling","text":"

We drop any debug related information and symbols from the compiled binary by utilzing -w -s linker flags.

To keep those symbols use DEBUG env variable:

DEBUG=true make k0s\n

Any value not equal to the \"false\" would work.

To add custom linker flags use LDFLAGS variable.

LD_FLAGS=\"--custom-flag=value\" make k0s\n
"},{"location":"troubleshooting/#im-using-custom-cri-and-missing-some-labels-in-prometheus-metrics","title":"I'm using custom CRI and missing some labels in Prometheus metrics","text":"

Due to removal of the embedded dockershim from Kubelet, the Kubelet's embedded cAdvisor metrics got slightly broken. If your container runtime is a custom containerd you can add --kubelet-extra-flags=\"--containerd=<path/to/containerd.sock>\" into k0s worker startup. That configures the Kubelet embedded cAdvisor to talk directly with containerd to gather the metrics and thus gets the expected labels in place.

Unfortunately this does not work on when using Docker via cri-dockerd shim. Currently, there is no easy solution to this problem.

In the future Kubelet will be refactored to get the container metrics from CRI interface rather than from the runtime directly. This work is specified and followed up in KEP-2371 but until that work completes the only option is to run a standalone cAdvisor. The known issues section in the official Kubernetes documentation about migrating away from dockershim explains the current shortcomings and shows how to run cAdvisor as a standalone DaemonSet.

"},{"location":"troubleshooting/#customized-configurations","title":"Customized configurations","text":"
  • All data directories reside under /var/lib/k0s, for example:
    • /var/lib/k0s/kubelet
    • /var/lib/k0s/etcd
"},{"location":"upgrade/","title":"Upgrade","text":"

The k0s upgrade is a simple process due to its single binary distribution. The k0s single binary file includes all the necessary parts for the upgrade and essentially the upgrade process is to replace that file and restart the service.

This tutorial explains two different approaches for k0s upgrade:

  • Upgrade a k0s node locally
  • Upgrade a k0s cluster using k0sctl
"},{"location":"upgrade/#upgrade-a-k0s-node-locally","title":"Upgrade a k0s node locally","text":"

If your k0s cluster has been deployed with k0sctl, then k0sctl provides the easiest upgrade method. In that case jump to the next chapter. However, if you have deployed k0s without k0sctl, then follow the upgrade method explained in this chapter.

Before starting the upgrade, consider moving your applications to another node if you want to avoid downtime. This can be done by draining a worker node. Remember to uncordon the worker node afterwards to tell Kubernetes that it can resume scheduling new pods onto the node.

The upgrade process is started by stopping the currently running k0s service.

sudo k0s stop\n

Now you can replace the old k0s binary file. The easiest way is to use the download script. It will download the latest k0s binary and replace the old binary with it. You can also do this manually without the download script.

curl -sSLf https://get.k0s.sh | sudo sh\n

Then you can start the service (with the upgraded k0s) and your upgrade is done.

sudo k0s start\n
"},{"location":"upgrade/#upgrade-a-k0s-cluster-using-k0sctl","title":"Upgrade a k0s cluster using k0sctl","text":"

The upgrading of k0s clusters using k0sctl occurs not through a particular command (there is no upgrade sub-command in k0sctl) but by way of the configuration file. The configuration file describes the desired state of the cluster, and when you pass the description to the k0sctl apply command a discovery of the current state is performed and the system does whatever is necessary to bring the cluster to the desired state (for example, perform an upgrade).

"},{"location":"upgrade/#k0sctl-cluster-upgrade-process","title":"k0sctl cluster upgrade process","text":"

The following operations occur during a k0sctl upgrade:

  1. Upgrade of each controller, one at a time. There is no downtime if multiple controllers are configured.

  2. Upgrade of workers, in batches of 10%.

  3. Draining of workers, which allows the workload to move to other nodes prior to the actual upgrade of the worker node components. (To skip the drain process, use the --no-drain option.)

  4. The upgrade process continues once the upgraded nodes return to Ready state.

You can configure the desired cluster version in the k0sctl configuration by setting the value of spec.k0s.version:

spec:\nk0s:\nversion: 1.28.1+k0s.0\n

If you do not specify a version, k0sctl checks online for the latest version and defaults to it.

k0sctl apply\n
...\n...\nINFO[0001] ==> Running phase: Upgrade controllers\nINFO[0001] [ssh] 10.0.0.23:22: starting upgrade\nINFO[0001] [ssh] 10.0.0.23:22: Running with legacy service name, migrating...\nINFO[0011] [ssh] 10.0.0.23:22: waiting for the k0s service to start\nINFO[0016] ==> Running phase: Upgrade workers\nINFO[0016] Upgrading 1 workers in parallel\nINFO[0016] [ssh] 10.0.0.17:22: upgrade starting\nINFO[0027] [ssh] 10.0.0.17:22: waiting for node to become ready again\nINFO[0027] [ssh] 10.0.0.17:22: upgrade successful\nINFO[0027] ==> Running phase: Disconnect from hosts\nINFO[0027] ==> Finished in 27s\nINFO[0027] k0s cluster version 1.28.1+k0s.0 is now installed\nINFO[0027] Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO[0027]      k0sctl kubeconfig\n
"},{"location":"user-management/","title":"User Management","text":""},{"location":"user-management/#adding-a-cluster-user","title":"Adding a Cluster User","text":"

Run the kubeconfig create command on the controller to add a user to the cluster. The command outputs a kubeconfig for the user, to use for authentication.

k0s kubeconfig create [username]\n
"},{"location":"user-management/#enabling-access-to-cluster-resources","title":"Enabling Access to Cluster Resources","text":"

Create the user with the system:masters group to grant the user access to the cluster:

k0s kubeconfig create --groups \"system:masters\" testUser > k0s.config\n

Create a roleBinding to grant the user access to the resources:

k0s kubectl create clusterrolebinding --kubeconfig k0s.config testUser-admin-binding --clusterrole=admin --user=testUser\n
"},{"location":"worker-node-config/","title":"Configuration options for worker nodes","text":"

Although the k0s worker command does not take in any special yaml configuration, there are still methods for configuring the workers to run various components.

"},{"location":"worker-node-config/#node-labels","title":"Node labels","text":"

The k0s worker command accepts the --labels flag, with which you can make the newly joined worker node the register itself, in the Kubernetes API, with the given set of labels.

For example, running the worker with k0s worker --token-file k0s.token --labels=\"k0sproject.io/foo=bar,k0sproject.io/other=xyz\" results in:

kubectl get node --show-labels\n
NAME      STATUS     ROLES    AGE   VERSION        LABELS\nworker0   NotReady   <none>   10s   v1.28.1+k0s  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux\n

Controller worker nodes are assigned node.k0sproject.io/role=control-plane and node-role.kubernetes.io/control-plane=true labels:

kubectl get node --show-labels\n
NAME          STATUS     ROLES           AGE   VERSION        LABELS\ncontroller0   NotReady   control-plane   10s   v1.28.1+k0s  beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true\n

Note: Setting the labels is only effective on the first registration of the node. Changing the labels thereafter has no effect.

"},{"location":"worker-node-config/#taints","title":"Taints","text":"

The k0s worker command accepts the --taints flag, with which you can make the newly joined worker node the register itself with the given set of taints.

Note: Controller nodes running with --enable-worker are assigned node-role.kubernetes.io/master:NoExecute taint automatically. You can disable default taints using --no-taints parameter.

kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints\n
NAME          TAINTS\ncontroller0   [map[effect:NoSchedule key:node-role.kubernetes.io/master]]\nworker0       <none>\n
"},{"location":"worker-node-config/#kubelet-configuration","title":"Kubelet configuration","text":"

The k0s worker command accepts a generic flag to pass in any set of arguments for kubelet process.

For example, running k0s worker --token-file=k0s.token --kubelet-extra-args=\"--node-ip=1.2.3.4 --address=0.0.0.0\" passes in the given flags to Kubelet as-is. As such, you must confirm that any flags you are passing in are properly formatted and valued as k0s will not validate those flags.

"},{"location":"worker-node-config/#worker-profiles","title":"Worker Profiles","text":"

Kubelet configuration fields can also be set via a worker profiles. Worker profiles are defined in the main k0s.yaml and are used to generate ConfigMaps containing a custom kubelet.config.k8s.io/v1beta1/KubeletConfiguration object. To see examples of k0s.yaml containing worker profiles: go here. For a list of possible Kubelet configuration fields: go here.

"},{"location":"worker-node-config/#iptables-mode","title":"IPTables Mode","text":"

k0s detects iptables backend automatically based on the existing records. On a brand-new setup, iptables-nft will be used. There is a --iptables-mode flag to specify the mode explicitly. Valid values: nft, legacy and auto (default).

k0s worker --iptables-mode=nft\n
"},{"location":"cli/","title":"Index","text":""},{"location":"cli/#k0s","title":"k0s","text":"

k0s - Zero Friction Kubernetes

"},{"location":"cli/#synopsis","title":"Synopsis","text":"

k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula

"},{"location":"cli/#options","title":"Options","text":"
  -h, --help   help for k0s\n
"},{"location":"cli/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
  • k0s api - Run the controller API
  • k0s backup - Back-Up k0s configuration. Must be run as root (or with sudo)
  • k0s completion - Generate completion script
  • k0s config - Configuration related sub-commands
  • k0s controller - Run controller
  • k0s ctr - containerd CLI
  • k0s docs - Generate k0s command documentation
  • k0s etcd - Manage etcd cluster
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
  • k0s kubeconfig - Create a kubeconfig file for a specified user
  • k0s kubectl - kubectl controls the Kubernetes cluster manager
  • k0s reset - Uninstall k0s. Must be run as root (or with sudo)
  • k0s restore - restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
  • k0s start - Start the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s status - Get k0s instance status information
  • k0s stop - Stop the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s sysinfo - Display system information
  • k0s token - Manage join tokens
  • k0s version - Print the k0s version
  • k0s worker - Run worker
"},{"location":"cli/k0s/","title":"K0s","text":""},{"location":"cli/k0s/#k0s","title":"k0s","text":"

k0s - Zero Friction Kubernetes

"},{"location":"cli/k0s/#synopsis","title":"Synopsis","text":"

k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula

"},{"location":"cli/k0s/#options","title":"Options","text":"
  -h, --help   help for k0s\n
"},{"location":"cli/k0s/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
  • k0s api - Run the controller API
  • k0s backup - Back-Up k0s configuration. Must be run as root (or with sudo)
  • k0s completion - Generate completion script
  • k0s config - Configuration related sub-commands
  • k0s controller - Run controller
  • k0s ctr - containerd CLI
  • k0s docs - Generate k0s command documentation
  • k0s etcd - Manage etcd cluster
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
  • k0s kubeconfig - Create a kubeconfig file for a specified user
  • k0s kubectl - kubectl controls the Kubernetes cluster manager
  • k0s reset - Uninstall k0s. Must be run as root (or with sudo)
  • k0s restore - restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
  • k0s start - Start the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s status - Get k0s instance status information
  • k0s stop - Stop the k0s service configured on this host. Must be run as root (or with sudo)
  • k0s sysinfo - Display system information
  • k0s token - Manage join tokens
  • k0s version - Print the k0s version
  • k0s worker - Run worker
"},{"location":"cli/k0s_airgap/","title":"K0s airgap","text":""},{"location":"cli/k0s_airgap/#k0s-airgap","title":"k0s airgap","text":"

Manage airgap setup

"},{"location":"cli/k0s_airgap/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for airgap\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s airgap list-images - List image names and version needed for air-gap install
"},{"location":"cli/k0s_airgap_list-images/","title":"K0s airgap list images","text":""},{"location":"cli/k0s_airgap_list-images/#k0s-airgap-list-images","title":"k0s airgap list-images","text":"

List image names and version needed for air-gap install

k0s airgap list-images [flags]\n
"},{"location":"cli/k0s_airgap_list-images/#examples","title":"Examples","text":"
k0s airgap list-images\n
"},{"location":"cli/k0s_airgap_list-images/#options","title":"Options","text":"
      --all                    include all images, even if they are not used in the current configuration\n  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for list-images\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap_list-images/#see-also","title":"SEE ALSO","text":"
  • k0s airgap - Manage airgap setup
"},{"location":"cli/k0s_api/","title":"K0s api","text":""},{"location":"cli/k0s_api/#k0s-api","title":"k0s api","text":"

Run the controller API

k0s api [flags]\n
"},{"location":"cli/k0s_api/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for api\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_api/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_backup/","title":"K0s backup","text":""},{"location":"cli/k0s_backup/#k0s-backup","title":"k0s backup","text":"

Back-Up k0s configuration. Must be run as root (or with sudo)

k0s backup [flags]\n
"},{"location":"cli/k0s_backup/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for backup\n      --save-path string       destination directory path for backup assets, use '-' for stdout\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_backup/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_completion/","title":"K0s completion","text":""},{"location":"cli/k0s_completion/#k0s-completion","title":"k0s completion","text":"

Generate completion script

"},{"location":"cli/k0s_completion/#synopsis","title":"Synopsis","text":"

To load completions:

Bash:

$ source <(k0s completion bash)

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once","title":"To load completions for each session, execute once:","text":"

$ k0s completion bash > /etc/bash_completion.d/k0s

Zsh:

"},{"location":"cli/k0s_completion/#if-shell-completion-is-not-already-enabled-in-your-environment-you-will-need","title":"If shell completion is not already enabled in your environment you will need","text":""},{"location":"cli/k0s_completion/#to-enable-it-you-can-execute-the-following-once","title":"to enable it. You can execute the following once:","text":"

$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_1","title":"To load completions for each session, execute once:","text":"

$ k0s completion zsh > \"${fpath[1]}/_k0s\"

"},{"location":"cli/k0s_completion/#you-will-need-to-start-a-new-shell-for-this-setup-to-take-effect","title":"You will need to start a new shell for this setup to take effect.","text":"

Fish:

$ k0s completion fish | source

"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_2","title":"To load completions for each session, execute once:","text":"

$ k0s completion fish > ~/.config/fish/completions/k0s.fish

k0s completion <bash|zsh|fish|powershell>\n
"},{"location":"cli/k0s_completion/#options","title":"Options","text":"
  -h, --help   help for completion\n
"},{"location":"cli/k0s_completion/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_config/","title":"K0s config","text":""},{"location":"cli/k0s_config/#k0s-config","title":"k0s config","text":"

Configuration related sub-commands

"},{"location":"cli/k0s_config/#options","title":"Options","text":"
  -h, --help   help for config\n
"},{"location":"cli/k0s_config/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s config create - Output the default k0s configuration yaml to stdout
  • k0s config edit - Launch the editor configured in your shell to edit k0s configuration
  • k0s config status - Display dynamic configuration reconciliation status
  • k0s config validate - Validate k0s configuration
"},{"location":"cli/k0s_config_create/","title":"K0s config create","text":""},{"location":"cli/k0s_config_create/#k0s-config-create","title":"k0s config create","text":"

Output the default k0s configuration yaml to stdout

k0s config create [flags]\n
"},{"location":"cli/k0s_config_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for create\n      --include-images         include the default images in the output\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_create/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_edit/","title":"K0s config edit","text":""},{"location":"cli/k0s_config_edit/#k0s-config-edit","title":"k0s config edit","text":"

Launch the editor configured in your shell to edit k0s configuration

k0s config edit [flags]\n
"},{"location":"cli/k0s_config_edit/#options","title":"Options","text":"
      --data-dir string   Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug             Debug logging [$DEBUG]\n  -h, --help              help for edit\n
"},{"location":"cli/k0s_config_edit/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_status/","title":"K0s config status","text":""},{"location":"cli/k0s_config_status/#k0s-config-status","title":"k0s config status","text":"

Display dynamic configuration reconciliation status

k0s config status [flags]\n
"},{"location":"cli/k0s_config_status/#options","title":"Options","text":"
      --data-dir string   Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug             Debug logging [$DEBUG]\n  -h, --help              help for status\n  -o, --output string     Output format. Must be one of yaml|json\n
"},{"location":"cli/k0s_config_status/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_config_validate/","title":"K0s config validate","text":""},{"location":"cli/k0s_config_validate/#k0s-config-validate","title":"k0s config validate","text":"

Validate k0s configuration

"},{"location":"cli/k0s_config_validate/#synopsis","title":"Synopsis","text":"

Example: k0s config validate --config path_to_config.yaml

k0s config validate [flags]\n
"},{"location":"cli/k0s_config_validate/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for validate\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_validate/#see-also","title":"SEE ALSO","text":"
  • k0s config - Configuration related sub-commands
"},{"location":"cli/k0s_controller/","title":"K0s controller","text":""},{"location":"cli/k0s_controller/#k0s-controller","title":"k0s controller","text":"

Run controller

k0s controller [join-token] [flags]\n
"},{"location":"cli/k0s_controller/#examples","title":"Examples","text":"
    Command to associate master nodes:\n    CLI argument:\n    $ k0s controller [join-token]\n\n    or CLI flag:\n    $ k0s controller --token-file [path_to_file]\n    Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_controller/#options","title":"Options","text":"
      --api-server string                              HACK: api-server for the windows worker node\n      --cidr-range string                              HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string                             HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n  -c, --config string                                  config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string                              container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string                                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                                          Debug logging (default: false)\n      --debugListenOn string                           Http listenOn for Debug pprof handler (default \":6060\")\n      --disable-components strings                     disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n      --enable-cloud-provider                          Whether or not to enable cloud provider support in kubelet\n      --enable-dynamic-config                          enable cluster-wide dynamic config based on custom resource\n      --enable-k0s-cloud-provider                      enables the k0s-cloud-provider (default false)\n      --enable-metrics-scraper                         enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n      --enable-worker                                  enable worker (default false)\n  -h, --help                                           help for controller\n      --ignore-pre-flight-checks                       continue even if pre-flight checks fail\n      --iptables-mode string                           iptables mode (valid values: nft, legacy, auto). default: auto\n      --k0s-cloud-provider-port int                    the port that k0s-cloud-provider binds on (default 10258)\n      --k0s-cloud-provider-update-frequency duration   the frequency of k0s-cloud-provider node updates (default 2m0s)\n      --kube-controller-manager-extra-args string      extra args for kube-controller-manager\n      --kubelet-extra-args string                      extra args for kubelet\n      --labels strings                                 Node labels, list of key=value pairs\n  -l, --logging stringToString                         Logging Levels for the different components (default [etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1])\n      --no-taints                                      disable default taints for controller node\n      --profile string                                 worker profile to use on the node (default \"default\")\n      --single                                         enable single node (implies --enable-worker, default false)\n      --status-socket string                           Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings                                 Node taints, list of key=value:effect strings\n      --token-file string                              Path to the file containing join-token.\n  -v, --verbose                                        Verbose logging (default: false)\n
"},{"location":"cli/k0s_controller/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_ctr/","title":"K0s ctr","text":""},{"location":"cli/k0s_ctr/#k0s-ctr","title":"k0s ctr","text":"

containerd CLI

"},{"location":"cli/k0s_ctr/#synopsis","title":"Synopsis","text":"

ctr is an unsupported debug and administrative client for interacting with the containerd daemon. Because it is unsupported, the commands, options, and operations are not guaranteed to be backward compatible or stable from release to release of the containerd project.

k0s ctr [flags]\n
"},{"location":"cli/k0s_ctr/#options","title":"Options","text":"
  -h, --help   help for ctr\n
"},{"location":"cli/k0s_ctr/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_docs/","title":"K0s docs","text":""},{"location":"cli/k0s_docs/#k0s-docs","title":"k0s docs","text":"

Generate k0s command documentation

k0s docs <markdown|man> [flags]\n
"},{"location":"cli/k0s_docs/#options","title":"Options","text":"
  -h, --help   help for docs\n
"},{"location":"cli/k0s_docs/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_etcd/","title":"K0s etcd","text":""},{"location":"cli/k0s_etcd/#k0s-etcd","title":"k0s etcd","text":"

Manage etcd cluster

"},{"location":"cli/k0s_etcd/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for etcd\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s etcd leave - Sign off a given etc node from etcd cluster
  • k0s etcd member-list - Returns etcd cluster members list
"},{"location":"cli/k0s_etcd_leave/","title":"K0s etcd leave","text":""},{"location":"cli/k0s_etcd_leave/#k0s-etcd-leave","title":"k0s etcd leave","text":"

Sign off a given etc node from etcd cluster

k0s etcd leave [flags]\n
"},{"location":"cli/k0s_etcd_leave/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for leave\n      --peer-address string    etcd peer address\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_leave/#see-also","title":"SEE ALSO","text":"
  • k0s etcd - Manage etcd cluster
"},{"location":"cli/k0s_etcd_member-list/","title":"K0s etcd member list","text":""},{"location":"cli/k0s_etcd_member-list/#k0s-etcd-member-list","title":"k0s etcd member-list","text":"

Returns etcd cluster members list

k0s etcd member-list [flags]\n
"},{"location":"cli/k0s_etcd_member-list/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for member-list\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_member-list/#see-also","title":"SEE ALSO","text":"
  • k0s etcd - Manage etcd cluster
"},{"location":"cli/k0s_install/","title":"K0s install","text":""},{"location":"cli/k0s_install/#k0s-install","title":"k0s install","text":"

Install k0s on a brand-new system. Must be run as root (or with sudo)

"},{"location":"cli/k0s_install/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -e, --env stringArray        set environment variable\n      --force                  force init script creation\n  -h, --help                   help for install\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_install/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s install controller - Install k0s controller on a brand-new system. Must be run as root (or with sudo)
  • k0s install worker - Install k0s worker on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install_controller/","title":"K0s install controller","text":""},{"location":"cli/k0s_install_controller/#k0s-install-controller","title":"k0s install controller","text":"

Install k0s controller on a brand-new system. Must be run as root (or with sudo)

k0s install controller [flags]\n
"},{"location":"cli/k0s_install_controller/#examples","title":"Examples","text":"
All default values of controller command will be passed to the service stub unless overridden.\n\nWith the controller subcommand you can setup a single node cluster by running:\n\n    k0s install controller --single\n
"},{"location":"cli/k0s_install_controller/#options","title":"Options","text":"
      --api-server string                              HACK: api-server for the windows worker node\n      --cidr-range string                              HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string                             HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n  -c, --config string                                  config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string                              container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string                                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                                          Debug logging (default: false)\n      --debugListenOn string                           Http listenOn for Debug pprof handler (default \":6060\")\n      --disable-components strings                     disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n      --enable-cloud-provider                          Whether or not to enable cloud provider support in kubelet\n      --enable-dynamic-config                          enable cluster-wide dynamic config based on custom resource\n      --enable-k0s-cloud-provider                      enables the k0s-cloud-provider (default false)\n      --enable-metrics-scraper                         enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n      --enable-worker                                  enable worker (default false)\n  -h, --help                                           help for controller\n      --iptables-mode string                           iptables mode (valid values: nft, legacy, auto). default: auto\n      --k0s-cloud-provider-port int                    the port that k0s-cloud-provider binds on (default 10258)\n      --k0s-cloud-provider-update-frequency duration   the frequency of k0s-cloud-provider node updates (default 2m0s)\n      --kube-controller-manager-extra-args string      extra args for kube-controller-manager\n      --kubelet-extra-args string                      extra args for kubelet\n      --labels strings                                 Node labels, list of key=value pairs\n  -l, --logging stringToString                         Logging Levels for the different components (default [containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info])\n      --no-taints                                      disable default taints for controller node\n      --profile string                                 worker profile to use on the node (default \"default\")\n      --single                                         enable single node (implies --enable-worker, default false)\n      --status-socket string                           Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings                                 Node taints, list of key=value:effect strings\n      --token-file string                              Path to the file containing join-token.\n  -v, --verbose                                        Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_controller/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -e, --env stringArray   set environment variable\n      --force             force init script creation\n
"},{"location":"cli/k0s_install_controller/#see-also","title":"SEE ALSO","text":"
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install_worker/","title":"K0s install worker","text":""},{"location":"cli/k0s_install_worker/#k0s-install-worker","title":"k0s install worker","text":"

Install k0s worker on a brand-new system. Must be run as root (or with sudo)

k0s install worker [flags]\n
"},{"location":"cli/k0s_install_worker/#examples","title":"Examples","text":"
Worker subcommand allows you to pass in all available worker parameters.\nAll default values of worker command will be passed to the service stub unless overridden.\n\nWindows flags like \"--api-server\", \"--cidr-range\" and \"--cluster-dns\" will be ignored since install command doesn't yet support Windows services\n
"},{"location":"cli/k0s_install_worker/#options","title":"Options","text":"
      --api-server string           HACK: api-server for the windows worker node\n      --cidr-range string           HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string          HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n      --cri-socket string           container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string             Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                       Debug logging (default: false)\n      --debugListenOn string        Http listenOn for Debug pprof handler (default \":6060\")\n      --enable-cloud-provider       Whether or not to enable cloud provider support in kubelet\n  -h, --help                        help for worker\n      --iptables-mode string        iptables mode (valid values: nft, legacy, auto). default: auto\n      --kubelet-extra-args string   extra args for kubelet\n      --labels strings              Node labels, list of key=value pairs\n  -l, --logging stringToString      Logging Levels for the different components (default [kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1])\n      --profile string              worker profile to use on the node (default \"default\")\n      --status-socket string        Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings              Node taints, list of key=value:effect strings\n      --token-file string           Path to the file containing token.\n  -v, --verbose                     Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_worker/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -e, --env stringArray   set environment variable\n      --force             force init script creation\n
"},{"location":"cli/k0s_install_worker/#see-also","title":"SEE ALSO","text":"
  • k0s install - Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_kubeconfig/","title":"K0s kubeconfig","text":""},{"location":"cli/k0s_kubeconfig/#k0s-kubeconfig","title":"k0s kubeconfig","text":"

Create a kubeconfig file for a specified user

k0s kubeconfig [command] [flags]\n
"},{"location":"cli/k0s_kubeconfig/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for kubeconfig\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s kubeconfig admin - Display Admin's Kubeconfig file
  • k0s kubeconfig create - Create a kubeconfig for a user
"},{"location":"cli/k0s_kubeconfig_admin/","title":"K0s kubeconfig admin","text":""},{"location":"cli/k0s_kubeconfig_admin/#k0s-kubeconfig-admin","title":"k0s kubeconfig admin","text":"

Display Admin's Kubeconfig file

"},{"location":"cli/k0s_kubeconfig_admin/#synopsis","title":"Synopsis","text":"

Print kubeconfig for the Admin user to stdout

k0s kubeconfig admin [flags]\n
"},{"location":"cli/k0s_kubeconfig_admin/#examples","title":"Examples","text":"
    $ k0s kubeconfig admin > ~/.kube/config\n    $ export KUBECONFIG=~/.kube/config\n    $ kubectl get nodes\n
"},{"location":"cli/k0s_kubeconfig_admin/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for admin\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_admin/#see-also","title":"SEE ALSO","text":"
  • k0s kubeconfig - Create a kubeconfig file for a specified user
"},{"location":"cli/k0s_kubeconfig_create/","title":"K0s kubeconfig create","text":""},{"location":"cli/k0s_kubeconfig_create/#k0s-kubeconfig-create","title":"k0s kubeconfig create","text":"

Create a kubeconfig for a user

"},{"location":"cli/k0s_kubeconfig_create/#synopsis","title":"Synopsis","text":"

Create a kubeconfig with a signed certificate and public key for a given user (and optionally user groups) Note: A certificate once signed cannot be revoked for a particular user

k0s kubeconfig create username [flags]\n
"},{"location":"cli/k0s_kubeconfig_create/#examples","title":"Examples","text":"
    Command to create a kubeconfig for a user:\n    CLI argument:\n    $ k0s kubeconfig create username\n\n    optionally add groups:\n    $ k0s kubeconfig create username --groups [groups]\n
"},{"location":"cli/k0s_kubeconfig_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n      --groups string          Specify groups\n  -h, --help                   help for create\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_create/#see-also","title":"SEE ALSO","text":"
  • k0s kubeconfig - Create a kubeconfig file for a specified user
"},{"location":"cli/k0s_kubectl/","title":"K0s kubectl","text":""},{"location":"cli/k0s_kubectl/#k0s-kubectl","title":"k0s kubectl","text":"

kubectl controls the Kubernetes cluster manager

"},{"location":"cli/k0s_kubectl/#synopsis","title":"Synopsis","text":"

kubectl controls the Kubernetes cluster manager.

Find more information at: https://kubernetes.io/docs/reference/kubectl/

k0s kubectl [flags]\n
"},{"location":"cli/k0s_kubectl/#options","title":"Options","text":"
      --as string                      Username to impersonate for the operation. User could be a regular user or a service account in a namespace.\n      --as-group stringArray           Group to impersonate for the operation, this flag can be repeated to specify multiple groups.\n      --as-uid string                  UID to impersonate for the operation.\n      --cache-dir string               Default cache directory (default \"/home/runner/.kube/cache\")\n      --certificate-authority string   Path to a cert file for the certificate authority\n      --client-certificate string      Path to a client certificate file for TLS\n      --client-key string              Path to a client key file for TLS\n      --cluster string                 The name of the kubeconfig cluster to use\n      --context string                 The name of the kubeconfig context to use\n      --data-dir string                Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n      --debug                          Debug logging [$DEBUG]\n      --disable-compression            If true, opt-out of response compression for all requests to the server\n  -h, --help                           help for kubectl\n      --insecure-skip-tls-verify       If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n      --kubeconfig string              Path to the kubeconfig file to use for CLI requests.\n      --log-flush-frequency duration   Maximum number of seconds between log flushes (default 5s)\n      --match-server-version           Require server version to match client version\n  -n, --namespace string               If present, the namespace scope for this CLI request\n      --password string                Password for basic authentication to the API server\n      --profile string                 Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default \"none\")\n      --profile-output string          Name of the file to write the profile to (default \"profile.pprof\")\n      --request-timeout string         The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\")\n  -s, --server string                  The address and port of the Kubernetes API server\n      --tls-server-name string         Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used\n      --token string                   Bearer token for authentication to the API server\n      --user string                    The name of the kubeconfig user to use\n      --username string                Username for basic authentication to the API server\n  -v, --v Level                        number for the log level verbosity\n      --vmodule moduleSpec             comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format)\n      --warnings-as-errors             Treat warnings received from the server as errors and exit with a non-zero exit code\n
"},{"location":"cli/k0s_kubectl/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_reset/","title":"K0s reset","text":""},{"location":"cli/k0s_reset/#k0s-reset","title":"k0s reset","text":"

Uninstall k0s. Must be run as root (or with sudo)

k0s reset [flags]\n
"},{"location":"cli/k0s_reset/#options","title":"Options","text":"
  -c, --config string          config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n      --cri-socket string      container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for reset\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_reset/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_restore/","title":"K0s restore","text":""},{"location":"cli/k0s_restore/#k0s-restore","title":"k0s restore","text":"

restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)

k0s restore filename [flags]\n
"},{"location":"cli/k0s_restore/#options","title":"Options","text":"
      --config-out string      Specify desired name and full path for the restored k0s.yaml file (default: /home/runner/work/k0s/k0s/k0s_<archive timestamp>.yaml\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for restore\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_restore/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_start/","title":"K0s start","text":""},{"location":"cli/k0s_start/#k0s-start","title":"k0s start","text":"

Start the k0s service configured on this host. Must be run as root (or with sudo)

k0s start [flags]\n
"},{"location":"cli/k0s_start/#options","title":"Options","text":"
  -h, --help   help for start\n
"},{"location":"cli/k0s_start/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_status/","title":"K0s status","text":""},{"location":"cli/k0s_status/#k0s-status","title":"k0s status","text":"

Get k0s instance status information

k0s status [flags]\n
"},{"location":"cli/k0s_status/#examples","title":"Examples","text":"
The command will return information about system init, PID, k0s role, kubeconfig and similar.\n
"},{"location":"cli/k0s_status/#options","title":"Options","text":"
  -h, --help                   help for status\n  -o, --out string             sets type of output to json or yaml\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s status components - Get k0s instance component status information
"},{"location":"cli/k0s_status_components/","title":"K0s status components","text":""},{"location":"cli/k0s_status_components/#k0s-status-components","title":"k0s status components","text":"

Get k0s instance component status information

k0s status components [flags]\n
"},{"location":"cli/k0s_status_components/#examples","title":"Examples","text":"
The command will return information about k0s components.\n
"},{"location":"cli/k0s_status_components/#options","title":"Options","text":"
  -h, --help            help for components\n      --max-count int   how many latest probes to show (default 1)\n
"},{"location":"cli/k0s_status_components/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":"
  -o, --out string             sets type of output to json or yaml\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status_components/#see-also","title":"SEE ALSO","text":"
  • k0s status - Get k0s instance status information
"},{"location":"cli/k0s_stop/","title":"K0s stop","text":""},{"location":"cli/k0s_stop/#k0s-stop","title":"k0s stop","text":"

Stop the k0s service configured on this host. Must be run as root (or with sudo)

k0s stop [flags]\n
"},{"location":"cli/k0s_stop/#options","title":"Options","text":"
  -h, --help   help for stop\n
"},{"location":"cli/k0s_stop/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_sysinfo/","title":"K0s sysinfo","text":""},{"location":"cli/k0s_sysinfo/#k0s-sysinfo","title":"k0s sysinfo","text":"

Display system information

"},{"location":"cli/k0s_sysinfo/#synopsis","title":"Synopsis","text":"

Runs k0s's pre-flight checks and issues the results to stdout.

k0s sysinfo [flags]\n
"},{"location":"cli/k0s_sysinfo/#options","title":"Options","text":"
      --controller        Include controller-specific sysinfo (default true)\n      --data-dir string   Data Directory for k0s (default \"/var/lib/k0s\")\n  -h, --help              help for sysinfo\n      --worker            Include worker-specific sysinfo (default true)\n
"},{"location":"cli/k0s_sysinfo/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_token/","title":"K0s token","text":""},{"location":"cli/k0s_token/#k0s-token","title":"k0s token","text":"

Manage join tokens

"},{"location":"cli/k0s_token/#options","title":"Options","text":"
  -h, --help   help for token\n
"},{"location":"cli/k0s_token/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
  • k0s token create - Create join token
  • k0s token invalidate - Invalidates existing join token
  • k0s token list - List join tokens
  • k0s token pre-shared - Generates token and secret and stores them as a files
"},{"location":"cli/k0s_token_create/","title":"K0s token create","text":""},{"location":"cli/k0s_token_create/#k0s-token-create","title":"k0s token create","text":"

Create join token

k0s token create [flags]\n
"},{"location":"cli/k0s_token_create/#examples","title":"Examples","text":"
k0s token create --role worker --expiry 100h //sets expiration time to 100 hours\nk0s token create --role worker --expiry 10m  //sets expiration time to 10 minutes\n
"},{"location":"cli/k0s_token_create/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n      --expiry string          Expiration time of the token. Format 1.5h, 2h45m or 300ms. (default \"0s\")\n  -h, --help                   help for create\n      --role string            Either worker or controller (default \"worker\")\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n      --wait                   wait forever (default false)\n
"},{"location":"cli/k0s_token_create/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_invalidate/","title":"K0s token invalidate","text":""},{"location":"cli/k0s_token_invalidate/#k0s-token-invalidate","title":"k0s token invalidate","text":"

Invalidates existing join token

k0s token invalidate [flags]\n
"},{"location":"cli/k0s_token_invalidate/#examples","title":"Examples","text":"
k0s token invalidate xyz123\n
"},{"location":"cli/k0s_token_invalidate/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for invalidate\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_invalidate/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_list/","title":"K0s token list","text":""},{"location":"cli/k0s_token_list/#k0s-token-list","title":"k0s token list","text":"

List join tokens

k0s token list [flags]\n
"},{"location":"cli/k0s_token_list/#examples","title":"Examples","text":"
k0s token list --role worker // list worker tokens\n
"},{"location":"cli/k0s_token_list/#options","title":"Options","text":"
      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for list\n      --role string            Either worker, controller or empty for all roles\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_list/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_token_pre-shared/","title":"K0s token pre shared","text":""},{"location":"cli/k0s_token_pre-shared/#k0s-token-pre-shared","title":"k0s token pre-shared","text":"

Generates token and secret and stores them as a files

k0s token pre-shared [flags]\n
"},{"location":"cli/k0s_token_pre-shared/#examples","title":"Examples","text":"
k0s token pre-shared --role worker --cert <path>/<to>/ca.crt --url https://<controller-ip>:<port>/\n
"},{"location":"cli/k0s_token_pre-shared/#options","title":"Options","text":"
      --cert string            path to the CA certificate file\n      --data-dir string        Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                  Debug logging (default: false)\n      --debugListenOn string   Http listenOn for Debug pprof handler (default \":6060\")\n  -h, --help                   help for pre-shared\n      --out string             path to the output directory. Default: current dir (default \".\")\n      --role string            token role. valid values: worker, controller. Default: worker (default \"worker\")\n      --status-socket string   Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --url string             url of the api server to join\n      --valid duration         how long token is valid, in Go duration format\n  -v, --verbose                Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_pre-shared/#see-also","title":"SEE ALSO","text":"
  • k0s token - Manage join tokens
"},{"location":"cli/k0s_version/","title":"K0s version","text":""},{"location":"cli/k0s_version/#k0s-version","title":"k0s version","text":"

Print the k0s version

k0s version [flags]\n
"},{"location":"cli/k0s_version/#options","title":"Options","text":"
  -a, --all    use to print all k0s version info\n  -h, --help   help for version\n  -j, --json   use to print all k0s version info in json\n
"},{"location":"cli/k0s_version/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s_worker/","title":"K0s worker","text":""},{"location":"cli/k0s_worker/#k0s-worker","title":"k0s worker","text":"

Run worker

k0s worker [join-token] [flags]\n
"},{"location":"cli/k0s_worker/#examples","title":"Examples","text":"
    Command to add worker node to the master node:\n    CLI argument:\n    $ k0s worker [token]\n\n    or CLI flag:\n    $ k0s worker --token-file [path_to_file]\n    Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_worker/#options","title":"Options","text":"
      --api-server string           HACK: api-server for the windows worker node\n      --cidr-range string           HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n      --cluster-dns string          HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n      --cri-socket string           container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n      --data-dir string             Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n  -d, --debug                       Debug logging (default: false)\n      --debugListenOn string        Http listenOn for Debug pprof handler (default \":6060\")\n      --enable-cloud-provider       Whether or not to enable cloud provider support in kubelet\n  -h, --help                        help for worker\n      --ignore-pre-flight-checks    continue even if pre-flight checks fail\n      --iptables-mode string        iptables mode (valid values: nft, legacy, auto). default: auto\n      --kubelet-extra-args string   extra args for kubelet\n      --labels strings              Node labels, list of key=value pairs\n  -l, --logging stringToString      Logging Levels for the different components (default [kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1])\n      --profile string              worker profile to use on the node (default \"default\")\n      --status-socket string        Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n      --taints strings              Node taints, list of key=value:effect strings\n      --token-file string           Path to the file containing token.\n  -v, --verbose                     Verbose logging (default: false)\n
"},{"location":"cli/k0s_worker/#see-also","title":"SEE ALSO","text":"
  • k0s - k0s - Zero Friction Kubernetes
"},{"location":"contributors/CODE_OF_CONDUCT/","title":"k0s Community Code of Conduct","text":"

k0s follows the CNCF Code of Conduct.

"},{"location":"contributors/github_workflow/","title":"GitHub Workflow","text":"

This guide assumes you have already cloned the upstream repo to your system via git clone, or via go get github.com/k0sproject/k0s.

"},{"location":"contributors/github_workflow/#fork-the-project","title":"Fork The Project","text":"
  1. Go to http://github.com/k0sproject/k0s
  2. On the top, right-hand side, click on \"fork\" and select your username for the fork destination.
"},{"location":"contributors/github_workflow/#adding-the-forked-remote","title":"Adding the Forked Remote","text":"
export GITHUB_USER={ your github username }\n
cd $WORKDIR/k0s\ngit remote add $GITHUB_USER git@github.com:${GITHUB_USER}/k0s.git\n\n# Prevent push to Upstream\ngit remote set-url --push origin no_push\n\n# Set your fork remote as a default push target\ngit push --set-upstream $GITHUB_USER main\n

Your remotes should look something like this:

git remote -v\n
origin  https://github.com/k0sproject/k0s (fetch)\norigin  no_push (push)\nmy_fork git@github.com:{ github_username }/k0s.git (fetch)\nmy_fork git@github.com:{ github_username }/k0s.git (push)\n
"},{"location":"contributors/github_workflow/#create-rebase-your-feature-branch","title":"Create & Rebase Your Feature Branch","text":"

Create a feature branch and switch to it:

git checkout -b my_feature_branch\n

Rebase your branch:

git fetch origin && \\\ngit rebase origin/main\n
Current branch my_feature_branch is up to date.\n

Please don't use git pull instead of the above fetch / rebase. git pull does a merge, which leaves merge commits. These make the commit history messy and violate the principle that commits ought to be individually understandable and useful.

"},{"location":"contributors/github_workflow/#commit-push","title":"Commit & Push","text":"

Commit and sign your changes:

git commit --signoff\n

The commit message should have a short, capitalized title without trailing period as first line. After the title a blank line and then a longer description that explains why the change was made, unless it is obvious.

Use imperative mood in the commit message.

For example:

Summarize changes in around 50 characters or less\n\nMore detailed explanatory text, if necessary. Wrap it to about 72\ncharacters or so. In some contexts, the first line is treated as the\nsubject of the commit and the rest of the text as the body. The\nblank line separating the summary from the body is critical (unless\nyou omit the body entirely); various tools like `log`, `shortlog`\nand `rebase` can get confused if you run the two together.\n\nExplain the problem that this commit is solving. Focus on why you\nare making this change as opposed to how (the code explains that).\nAre there side effects or other unintuitive consequences of this\nchange? Here's the place to explain them.\n\nFurther paragraphs come after blank lines.\n\n - Bullet points are okay, too\n\n - Typically a hyphen or asterisk is used for the bullet, preceded\n   by a single space, with blank lines in between.\n\nIf you use an issue tracker, put references to them at the bottom,\nlike this:\n\nFixes: https://github.com/k0sproject/k0s/issues/373\nSee also: #456, #789\n\nSigned-off-by: Name Lastname <user@example.com>\n

You can go back and edit/build/test some more, then commit --amend in a few cycles.

When ready, push your changes to your fork's repository:

git push --set-upstream my_fork my_feature_branch\n
"},{"location":"contributors/github_workflow/#open-a-pull-request","title":"Open a Pull Request","text":"

See GitHub's docs on how to create a pull request from a fork.

"},{"location":"contributors/github_workflow/#get-a-code-review","title":"Get a code review","text":"

Once your pull request has been opened it will be assigned to one or more reviewers, and will go through a series of smoke tests.

Commit changes made in response to review comments should be added to the same branch on your fork.

Very small PRs are easy to review. Very large PRs are very difficult to review.

"},{"location":"contributors/github_workflow/#squashing-commits","title":"Squashing Commits","text":"

Commits on your branch should represent meaningful milestones or units of work. Small commits that contain typo fixes, rebases, review feedbacks, etc should be squashed.

To do that, it's best to perform an interactive rebase:

"},{"location":"contributors/github_workflow/#example","title":"Example","text":"

Rebase your feature branch against upstream main branch:

git rebase -i origin/main\n

If your PR has 3 commits, output would be similar to this:

pick f7f3f6d Changed some code\npick 310154e fixed some typos\npick a5f4a0d made some review changes\n\n# Rebase 710f0f8..a5f4a0d onto 710f0f8\n#\n# Commands:\n# p, pick <commit> = use commit\n# r, reword <commit> = use commit, but edit the commit message\n# e, edit <commit> = use commit, but stop for amending\n# s, squash <commit> = use commit, but meld into previous commit\n# f, fixup <commit> = like \"squash\", but discard this commit's log message\n# x, exec <command> = run command (the rest of the line) using shell\n# b, break = stop here (continue rebase later with 'git rebase --continue')\n# d, drop <commit> = remove commit\n# l, label <label> = label current HEAD with a name\n# t, reset <label> = reset HEAD to a label\n# m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n# .       create a merge commit using the original merge commit's\n# .       message (or the oneline, if no original merge commit was\n# .       specified). Use -c <commit> to reword the commit message.\n#\n# These lines can be re-ordered; they are executed from top to bottom.\n#\n# However, if you remove everything, the rebase will be aborted.\n#\n# Note that empty commits are commented out\n

Use a command line text editor to change the word pick to f of fixup for the commits you want to squash, then save your changes and continue the rebase:

Per the output above, you can see that:

fixup <commit> = like \"squash\", but discard this commit's log message\n

Which means that when rebased, the commit message \"fixed some typos\" will be removed, and squashed with the parent commit.

"},{"location":"contributors/github_workflow/#push-your-final-changes","title":"Push Your Final Changes","text":"

Once done, you can push the final commits to your branch:

git push --force\n

You can run multiple iteration of rebase/push -f, if needed.

"},{"location":"contributors/overview/","title":"Contributing to k0s","text":"

Thank you for taking the time to make a contribution to k0s. The following document is a set of guidelines and instructions for contributing to k0s.

When contributing to this repository, please consider first discussing the change you wish to make by opening an issue.

"},{"location":"contributors/overview/#code-of-conduct","title":"Code of Conduct","text":"

Our code of conduct can be found in the link below. Please follow it in all your interactions with the project.

  • Code Of Conduct
"},{"location":"contributors/overview/#github-workflow","title":"GitHub Workflow","text":"

We use GitHub flow, so all code changes are tracked via Pull Requests. A detailed guide on the recommended workflow can be found below:

  • GitHub Workflow
"},{"location":"contributors/overview/#code-testing","title":"Code Testing","text":"

All submitted PRs go through a set of tests and reviews. You can run most of these tests before a PR is submitted. In fact, we recommend it, because it will save on many possible review iterations and automated tests. The testing guidelines can be found here:

  • Contributor's Guide to Testing
"},{"location":"contributors/overview/#license","title":"License","text":"

By contributing, you agree that your contributions will be licensed as followed:

  • All content residing under the \"docs/\" directory of this repository is licensed under \"Creative Commons Attribution Share Alike 4.0 International\" (CC-BY-SA-4.0). See docs/LICENCE for details.
  • Content outside of the above mentioned directories or restrictions above is available under the \"Apache License 2.0\".
"},{"location":"contributors/overview/#community","title":"Community","text":"

Some of you might have noticed we have official community blog hosted on Medium. If you are not yet following us, we'd like to invite you to do so now! Make sure to follow us on Twitter as well \ud83d\ude0a

We have also decided to participate in the Lens Forums. As part of our ongoing collaboration with the Lens IDE team, who are not only close friends of the k0s crew but also widely embraced by the Kubernetes user community, it was only natural for us to join forces on their platform. By becoming a part of the Lens Forums, you can easily connect with us through the dedicated k0s categories. Stay in the loop with the latest news, engage in technical discussions, and contribute your expertise and feedback!

"},{"location":"contributors/testing/","title":"Testing Your Code","text":"

k0s uses github actions to run automated tests on any PR, before merging. However, a PR will not be reviewed before all tests are green, so to save time and prevent your PR from going stale, it is best to test it before submitting the PR.

"},{"location":"contributors/testing/#run-local-verifications","title":"Run Local Verifications","text":"

Please run the following style and formatting commands and fix/check-in any changes:

  1. Linting

    We use golangci-lint for style verification. In the repository's root directory, simply run:

    make lint\n

    There's no need to install golangci-lint manually. The build system will take care of that.

  2. Go fmt

    go fmt ./...\n
  3. Checking the documentation

    Verify any changes to the documentation by following the instructions here.

  4. Pre-submit Flight Checks

    In the repository root directory, make sure that:

    • make build && git diff --exit-code runs successfully. Verifies that the build is working and that the generated source code matches the one that's checked into source control.
    • make check-unit runs successfully. Verifies that all the unit tests pass.
    • make check-basic runs successfully. Verifies basic cluster functionality using one controller and two workers.
    • make check-hacontrolplane runs successfully. Verifies that joining of controllers works.

    Please note that this last test is prone to \"flakiness\", so it might fail on occasion. If it fails constantly, take a deeper look at your code to find the source of the problem.

    If you find that all tests passed, you may open a pull request upstream.

"},{"location":"contributors/testing/#opening-a-pull-request","title":"Opening A Pull Request","text":""},{"location":"contributors/testing/#draft-mode","title":"Draft Mode","text":"

You may open a pull request in draft mode. All automated tests will still run against the PR, but the PR will not be assigned for review. Once a PR is ready for review, transition it from Draft mode, and code owners will be notified.

"},{"location":"contributors/testing/#conformance-testing","title":"Conformance Testing","text":"

Once a PR has been reviewed and all other tests have passed, a code owner will run a full end-to-end conformance test against the PR. This is usually the last step before merging.

"},{"location":"contributors/testing/#pre-requisites-for-pr-merge","title":"Pre-Requisites for PR Merge","text":"

In order for a PR to be merged, the following conditions should exist:

  1. The PR has passed all the automated tests (style, build & conformance tests).
  2. PR commits have been signed with the --signoff option.
  3. PR was reviewed and approved by a code owner.
  4. PR is rebased against upstream's main branch.
"},{"location":"contributors/testing/#cleanup-the-local-workspace","title":"Cleanup the local workspace","text":"

In order to clean up the local workspace, run make clean. It will clean up all of the intermediate files and directories created during the k0s build. Note that you can't just use git clean -X or even rm -rf, since the Go modules cache sets all of its subdirectories to read-only. If you get in trouble while trying to delete your local workspace, try chmod -R u+w /path/to/workspace && rm -rf /path/to/workspace.

"},{"location":"examples/ambassador-ingress/","title":"Installing Ambassador API Gateway","text":"

You can configure k0s with the Ambassador API Gateway and a MetalLB service loadbalancer. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml file during cluster configuration.

Note: Currently Ambassador API Gateway does not support Kubernetes v1.22 or above. See here for details.

"},{"location":"examples/ambassador-ingress/#use-docker-for-non-native-k0s-platforms","title":"Use Docker for non-native k0s platforms","text":"

With Docker you can run k0s on platforms that the distribution does not natively support (refer to Run k0s in Docker). Skip this section if you are on a platform that k0s natively supports.

As you need to create a custom configuration file to install Ambassador Gateway, you will first need to map that file into the k0s container and to expose the ports Ambassador needs for outside access.

  1. Run k0s under Docker:

    docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
  2. Export the default k0s configuration file:

    docker exec k0s k0s config create > k0s.yaml\n
  3. Export the cluster config, so you can access it using kubectl:

    docker exec k0s cat /var/lib/k0s/pki/admin.conf > k0s-cluster.conf\nexport KUBECONFIG=\"$KUBECONFIG:$PWD/k0s-cluster.conf\"\n
"},{"location":"examples/ambassador-ingress/#configure-k0syaml-for-ambassador-gateway","title":"Configure k0s.yaml for Ambassador Gateway","text":"
  1. Open the k0s.yml file and append the following extensions at the end:

    extensions:\nhelm:\nrepositories:\n- name: datawire\nurl: https://www.getambassador.io\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: ambassador\nchartname: datawire/ambassador\nversion: \"6.5.13\"\nnamespace: ambassador\nvalues: |2\nservice:\nexternalIPs:\n- 172.17.0.2\n- name: metallb\nchartname: bitnami/metallb\nversion: \"1.0.1\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 172.17.0.2\n

    Note: It may be necessary to replace the 172.17.0.2 IP with your local IP address.

    This action adds both Ambassador and MetalLB (required for LoadBalancers) with the corresponding repositories and (minimal) configurations. Be aware that the provided example illustrates the use of your local network and that you will want to provide a range of IPs for MetalLB that are addressable on your LAN to access these services from anywhere on your network.

  2. Stop/remove your k0s container:

    docker stop k0s\ndocker rm k0s\n
  3. Retart your k0s container, this time with additional ports and the above config file mapped into it:

    docker run --name k0s --hostname k0s --privileged -v /var/lib/k0s -v \"$PWD\"/k0s.yaml:/k0s.yaml -p 6443:6443 -p 80:80 -p 443:443 -p 8080:8080 docker.io/k0sproject/k0s:latest\n

    After some time, you will be able to list the Ambassador Services:

    kubectl get services -n ambassador\n

    Output:

    NAME                          TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE\nambassador-1611224811         LoadBalancer   10.99.84.151    172.17.0.2    80:30327/TCP,443:30355/TCP   2m11s\nambassador-1611224811-admin   ClusterIP      10.96.79.130    <none>        8877/TCP                     2m11s\nambassador-1611224811-redis   ClusterIP      10.110.33.229   <none>        6379/TCP                     2m11s\n
  4. Install the Ambassador edgectl tool and run the login command:

    edgectl login --namespace=ambassador localhost\n

    Your browser will open and deeliver you to the Ambassador Console.

"},{"location":"examples/ambassador-ingress/#deploy-map-a-service","title":"Deploy / Map a Service","text":"
  1. Create a YAML file for the service (for example purposes, create a Swagger Petstore service using a petstore.YAML file):

    ---\napiVersion: v1\nkind: Service\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nports:\n- name: http\nport: 80\ntargetPort: 8080\nselector:\napp: petstore\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: petstore\nstrategy:\ntype: RollingUpdate\ntemplate:\nmetadata:\nlabels:\napp: petstore\nspec:\ncontainers:\n- name: petstore-backend\nimage: docker.io/swaggerapi/petstore3:unstable\nports:\n- name: http\ncontainerPort: 8080\n---\napiVersion: getambassador.io/v2\nkind:  Mapping\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nprefix: /petstore/\nservice: petstore\n
  2. Apply the YAML file:

    kubectl apply -f petstore.yaml\n

    Output:

    service/petstore created\ndeployment.apps/petstore created\nmapping.getambassador.io/petstore created\n
  3. Validate that the service is running.

    In the terminal using curl:

    curl -k 'https://localhost/petstore/api/v3/pet/findByStatus?status=available'\n

    Output:

    [{\"id\":1,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":2,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":4,\"category\":{\"id\":1,\"name\":\"Dogs\"},\"name\":\"Dog 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":7,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":8,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":9,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 3\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"},{\"id\":10,\"category\":{\"id\":3,\"name\":\"Rabbits\"},\"name\":\"Rabbit 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"}]\n

    Or by way of your browser:

    Open https://localhost/petstore/ in your browser and change the URL in the field at the top of the page to https://localhost/petstore/api/v3/openapi.json (as it is mapped to the /petstore prefix) and click Explore.

  4. Navigate to the Mappings area in the Ambassador Console to view the corresponding PetStore mapping as configured.

"},{"location":"examples/ansible-playbook/","title":"Creating a cluster with an Ansible Playbook","text":"

Ansible is a popular infrastructure-as-code tool that can use to automate tasks for the purpose of achieving the desired state in a system. With Ansible (and the k0s-Ansible playbook) you can quickly install a multi-node Kubernetes Cluster.

Note: Before using Ansible to create a cluster, you should have a general understanding of Ansible (refer to the official Ansible User Guide.

"},{"location":"examples/ansible-playbook/#prerequisites","title":"Prerequisites","text":"

You will require the following tools to install k0s on local virtual machines:

Tool Detail multipass A lightweight VM manager that uses KVM on Linux, Hyper-V on Windows, and hypervisor.framework on macOS. Installation information ansible An infrastructure as code tool. Installation Guide kubectl Command line tool for running commands against Kubernetes clusters. Kubernetes Install Tools"},{"location":"examples/ansible-playbook/#create-the-cluster","title":"Create the cluster","text":"
  1. Download k0s-ansible

    Clone the k0s-ansible repository on your local machine:

    git clone https://github.com/movd/k0s-ansible.git\ncd k0s-ansible\n
  2. Create virtual machines

    Note: Though multipass is the VM manager in use here, there is no interdependence.

    Create a number of virtual machines. For the automation to work, each instance must have passwordless SSH access. To achieve this, provision each instance with a cloud-init manifest that imports your current users' public SSH key and into a user k0s (refer to the bash script below).

    This creates 7 virtual machines:

    ./tools/multipass_create_instances.sh 7\n
    Create cloud-init to import ssh key...\n[1/7] Creating instance k0s-1 with multipass...\nLaunched: k0s-1\n[2/7] Creating instance k0s-2 with multipass...\nLaunched: k0s-2\n[3/7] Creating instance k0s-3 with multipass...\nLaunched: k0s-3\n[4/7] Creating instance k0s-4 with multipass...\nLaunched: k0s-4\n[5/7] Creating instance k0s-5 with multipass...\nLaunched: k0s-5\n[6/7] Creating instance k0s-6 with multipass...\nLaunched: k0s-6\n[7/7] Creating instance k0s-7 with multipass...\nLaunched: k0s-7\nName State IPv4 Image\nk0s-1 Running 192.168.64.32 Ubuntu 20.04 LTS\nk0s-2 Running 192.168.64.33 Ubuntu 20.04 LTS\nk0s-3 Running 192.168.64.56 Ubuntu 20.04 LTS\nk0s-4 Running 192.168.64.57 Ubuntu 20.04 LTS\nk0s-5 Running 192.168.64.58 Ubuntu 20.04 LTS\nk0s-6 Running 192.168.64.60 Ubuntu 20.04 LTS\nk0s-7 Running 192.168.64.61 Ubuntu 20.04 LTS\n
  3. Create Ansible inventory

    1. Copy the sample to create the inventory directory:

      ```shell\n  cp -rfp inventory/sample inventory/multipass\n  ```\n

    2. Create the inventory.

      Assign the virtual machines to the different host groups, as required by the playbook logic.\n\n  | Host group            | Detail                                    |\n  |:----------------------|:------------------------------------------|\n  | `initial_controller`  | Must contain a single node that creates the worker and controller tokens needed by the other nodes|\n  | `controller`          | Can contain nodes that, together with the host from `initial_controller`, form a highly available isolated control plane |\n  | `worker`              | Must contain at least one node, to allow for the deployment of Kubernetes objects |\n

    3. Fill in inventory/multipass/inventory.yml. This can be done by direct entry using the metadata provided by multipass list,, or you can use the following Python script multipass_generate_inventory.py:

      ```shell\n  ./tools/multipass_generate_inventory.py\n  ```\n\n  ```shell\n  Designate first three instances as control plane\n  Created Ansible Inventory at: /Users/dev/k0s-ansible/tools/inventory.yml\n  $ cp tools/inventory.yml inventory/multipass/inventory.yml\n  ```\n\n  Your `inventory/multipass/inventory.yml` should resemble the example below:\n\n  ```yaml\n  ---\n  all:\n    children:\n      initial_controller:\n        hosts:\n          k0s-1:\n      controller:\n        hosts:\n          k0s-2:\n          k0s-3:\n      worker:\n        hosts:\n          k0s-4:\n          k0s-5:\n          k0s-6:\n          k0s-7:\n    hosts:\n      k0s-1:\n        ansible_host: 192.168.64.32\n      k0s-2:\n        ansible_host: 192.168.64.33\n      k0s-3:\n        ansible_host: 192.168.64.56\n      k0s-4:\n        ansible_host: 192.168.64.57\n      k0s-5:\n        ansible_host: 192.168.64.58\n      k0s-6:\n        ansible_host: 192.168.64.60\n      k0s-7:\n        ansible_host: 192.168.64.61\n    vars:\n      ansible_user: k0s\n  ```\n
  4. Test the virtual machine connections

    Run the following command to test the connection to your hosts:

    ansible -i inventory/multipass/inventory.yml -m ping\n
    k0s-4 | SUCCESS => {\n\"ansible_facts\": {\n\"discovered_interpreter_python\": \"/usr/bin/python3\"\n},\n    \"changed\": false,\n    \"ping\": \"pong\"\n}\n...\n

    If the test result indicates success, you can proceed.

  5. Provision the cluster with Ansible

    Applying the playbook, k0s download and be set up on all nodes, tokens will be exchanged, and a kubeconfig will be dumped to your local deployment environment.

    ansible-playbook site.yml -i inventory/multipass/inventory.yml\n
    TASK [k0s/initial_controller : print kubeconfig command] *******************************************************\nTuesday 22 December 2020  17:43:20 +0100 (0:00:00.257)       0:00:41.287 ******\nok: [k0s-1] => {\n\"msg\": \"To use Cluster: export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\"\n}\n...\nPLAY RECAP *****************************************************************************************************\nk0s-1                      : ok=21   changed=11   unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-2                      : ok=10   changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-3                      : ok=10   changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-4                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-5                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-6                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\nk0s-7                      : ok=9    changed=5    unreachable=0    failed=0    skipped=1    rescued=0    ignored=0\n\nTuesday 22 December 2020  17:43:36 +0100 (0:00:01.204)       0:00:57.478 ******\n===============================================================================\nprereq : Install apt packages -------------------------------------------------------------------------- 22.70s\nk0s/controller : Wait for k8s apiserver ----------------------------------------------------------------- 4.30s\nk0s/initial_controller : Create worker join token ------------------------------------------------------- 3.38s\nk0s/initial_controller : Wait for k8s apiserver --------------------------------------------------------- 3.36s\ndownload : Download k0s binary k0s-v0.9.0-rc1-amd64 ----------------------------------------------------- 3.11s\nGathering Facts ----------------------------------------------------------------------------------------- 2.85s\nGathering Facts ----------------------------------------------------------------------------------------- 1.95s\nprereq : Create k0s Directories ------------------------------------------------------------------------- 1.53s\nk0s/worker : Enable and check k0s service --------------------------------------------------------------- 1.20s\nprereq : Write the k0s config file ---------------------------------------------------------------------- 1.09s\nk0s/initial_controller : Enable and check k0s service --------------------------------------------------- 0.94s\nk0s/controller : Enable and check k0s service ----------------------------------------------------------- 0.73s\nGathering Facts ----------------------------------------------------------------------------------------- 0.71s\nGathering Facts ----------------------------------------------------------------------------------------- 0.66s\nGathering Facts ----------------------------------------------------------------------------------------- 0.64s\nk0s/worker : Write the k0s token file on worker --------------------------------------------------------- 0.64s\nk0s/worker : Copy k0s service file ---------------------------------------------------------------------- 0.53s\nk0s/controller : Write the k0s token file on controller ------------------------------------------------- 0.41s\nk0s/controller : Copy k0s service file ------------------------------------------------------------------ 0.40s\nk0s/initial_controller : Copy k0s service file ---------------------------------------------------------- 0.36s\n
"},{"location":"examples/ansible-playbook/#use-the-cluster-with-kubectl","title":"Use the cluster with kubectl","text":"

A kubeconfig was copied to your local machine while the playbook was running which you can use to gain access to your new Kubernetes cluster:

export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\nkubectl cluster-info\n
Kubernetes control plane is running at https://192.168.64.32:6443\nCoreDNS is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\n\n$ kubectl get nodes -o wide\nNAME    STATUS     ROLES    AGE   VERSION        INTERNAL-IP     EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION     CONTAINER-RUNTIME\nk0s-4   Ready      <none>   21s   v1.20.1-k0s1   192.168.64.57   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-5   Ready      <none>   21s   v1.20.1-k0s1   192.168.64.58   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-6   NotReady   <none>   21s   v1.20.1-k0s1   192.168.64.60   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\nk0s-7   NotReady   <none>   21s   v1.20.1-k0s1   192.168.64.61   <none>        Ubuntu 20.04.1 LTS   5.4.0-54-generic   containerd://1.4.3\n

Note: The first three control plane nodes will not display, as the control plane is fully isolated. To check on the distributed etcd cluster, you can use ssh to securely log a controller node, or you can run the following ad-hoc command:

ansible k0s-1 -a \"k0s etcd member-list -c /etc/k0s/k0s.yaml\" -i inventory/multipass/inventory.yml | tail -1 | jq\n
{\n\"level\": \"info\",\n\"members\": {\n\"k0s-1\": \"https://192.168.64.32:2380\",\n\"k0s-2\": \"https://192.168.64.33:2380\",\n\"k0s-3\": \"https://192.168.64.56:2380\"\n},\n\"msg\": \"done\",\n\"time\": \"2020-12-23T00:21:22+01:00\"\n}\n

Once all worker nodes are at Ready state you can use the cluster. You can test the cluster state by creating a simple nginx deployment.

kubectl create deployment nginx --image=gcr.io/google-containers/nginx --replicas=5\n
deployment.apps/nginx created\n
kubectl expose deployment nginx --target-port=80 --port=8100\n
service/nginx exposed\n
kubectl run hello-k0s --image=quay.io/prometheus/busybox --rm -it --restart=Never --command -- wget -qO- nginx:8100\n
<!DOCTYPE html>\n<html>\n<head>\n<title>Welcome to nginx on Debian!</title>\n...\npod \"hello-k0s\" deleted\n

Note: k0s users are the developers of k0s-ansible. Please send your feedback, bug reports, and pull requests to github.com/movd/k0s-ansible._

"},{"location":"examples/gitops-flux/","title":"Using GitOps with Flux","text":"

This tutorial describes the benefits of using GitOps with k0s and provides an example of deploying an application with Flux v2.

GitOps is a practice where you leverage Git as the single source of truth. It offers a declarative way to do Kubernetes cluster management and application delivery. The desired states, using Kubernetes manifests and helm packages, are pulled from a git repository and automatically deployed to the cluster. This also makes it quick to re-deploy and recover applications whenever needed.

"},{"location":"examples/gitops-flux/#why-gitops-with-k0s","title":"Why GitOps with k0s","text":"

k0s doesn't come with a lot of different extensions and add-ons that some users might find useful (and some not). Instead, k0s comes with 100% upstream Kubernetes and is compatible with all Kubernetes extensions. This makes it easy for k0s users to freely select the needed extensions that their applications and infrastructure need, without conflicting to any predefined options. Now, GitOps is a perfect practice to deploy these extensions automatically with applications by defining and configuring them directly in Git. This will also help with cluster security as the cluster doesn't need to be accessed directly when application changes are needed. However, this puts more stress on the Git access control, because changes in Git are propagated automatically to the cluster.

"},{"location":"examples/gitops-flux/#install-k0s","title":"Install k0s","text":"

Let's start by installing k0s. Any k0s deployment option will do, but to keep things simple, this Quick Start Guide gets you started with a single node k0s cluster.

Run these three commands to download k0s, install and start it:

curl -sSLf https://get.k0s.sh | sudo sh\nsudo k0s install controller --single\nsudo k0s start\n
"},{"location":"examples/gitops-flux/#set-kubeconfig","title":"Set kubeconfig","text":"

Next, you need to set the KUBECONFIG variable, which is needed by Flux CLI later on.

sudo k0s kubeconfig admin > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\n
"},{"location":"examples/gitops-flux/#install-flux","title":"Install Flux","text":"

To proceed with Flux, install the Flux CLI, which is used for configuring Flux to your Kubernetes cluster. For macOS and Linux, this can be done either with brew or bash script. Use one of them:

brew install fluxcd/tap/flux\n

or

curl -s https://fluxcd.io/install.sh | sudo bash\n

For more details of the Flux installation, check the Flux documentation.

"},{"location":"examples/gitops-flux/#configure-flux-for-a-github-repository","title":"Configure Flux for a GitHub repository","text":"

Export your GitHub personal access token (instructions how to get it) and username:

export GITHUB_TOKEN=<your-token>\nexport GITHUB_USER=<your-username>\n

Come up with a GitHub repo name (e.g. flux-demo), which will be used by Flux to store (and sync) the config files.

export GITHUB_REPO_NAME=<select-repo-name-to-be-created>\n

Bootstrap flux to your cluster. The GitHub repo will be created automatically by Flux:

flux bootstrap github \\\n--owner=$GITHUB_USER \\\n--repository=$GITHUB_REPO_NAME \\\n--branch=main \\\n--path=./clusters/my-cluster \\\n--personal\n

Now you are all set with Flux and can proceed to deploy your first application.

"},{"location":"examples/gitops-flux/#deploy-example-application","title":"Deploy example application","text":"

Next, we'll deploy a simple web application and expose it using a NodePort service. In the previous step, we configured Flux to track the path /clusters/my-cluster/ in your repository. Now clone the repo to your local machine:

git clone git@github.com:$GITHUB_USER/$GITHUB_REPO_NAME.git\ncd $GITHUB_REPO_NAME/clusters/my-cluster/\n

Create the following YAML file (simple-web-server-with-nodeport.yaml) into the same directory:

apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\ntype: NodePort\nselector:\napp: web\nports:\n- port: 80\ntargetPort: 80\nnodePort: 30003\n

Then push the new file to the repository:

git add .\ngit commit -m \"Add web server manifest\"\ngit push\n

Check that Flux detects your changes and the web server gets applied (by default this should happen within 1 min):

flux get kustomizations\n

If the deployment went successfully, you should see the newly added objects:

sudo k0s kubectl get all -n web\n

You can try to access the web application using

curl localhost:30003\n

or by using a web browser http://localhost:30003.

Voil\u00e0! You have now installed the example application using the GitOps method with Flux. As a next step you can try to modify the web app YAML file or add another application directly in to the Git repo and see how Flux will automatically pick up the changes without accessing the cluster with kubectl.

"},{"location":"examples/gitops-flux/#uninstall-flux","title":"Uninstall Flux","text":"

If you want to uninstall Flux from the cluster, run:

flux uninstall --namespace=flux-system\n

Your applications, which were installed by Flux, will remain in the cluster, but you don't have the Flux processes anymore to sync up the desired state from Git.

"},{"location":"examples/metallb-loadbalancer/","title":"Installing MetalLB Load Balancer","text":"

This tutorial covers the installation of MetalLB load balancer on k0s. k0s doesn't come with an in-built load balancer, but it's easy to deploy MetalLB as shown in this document.

"},{"location":"examples/metallb-loadbalancer/#about-load-balancers","title":"About Load Balancers","text":"

Load balancers can be used for exposing applications to the external network. Load balancer provides a single IP address to route incoming requests to your app. In order to successfully create Kubernetes services of type LoadBalancer, you need to have the load balancer (implementation) available for Kubernetes.

Load balancer can be implemented by a cloud provider as an external service (with additional cost). This can also be implemented internally in the Kubernetes cluster (pure SW solution) with MetalLB.

"},{"location":"examples/metallb-loadbalancer/#metallb","title":"MetalLB","text":"

MetalLB implements the Kubernetes service of type LoadBalancer. When a LoadBalancer service is requested, MetalLB allocates an IP address from the configured range and makes the network aware that the IP \u201clives\u201d in the cluster.

One of the benefits of MetalLB is that you avoid all cloud provider dependencies. That's why MetalLB is typically used for bare-metal deployments.

See the MetalLB requirements in the MetalLB's official documentation. By default, k0s runs with Kube-Router CNI, which is compatible with MetalLB as long as you don't use MetalLB\u2019s BGP mode. If you are not using Kube-Router and you are using kube-proxy in IPVS mode, you need to enable strict ARP mode in kube-proxy (see MetalLB preparations):

apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nnetwork:\nkubeProxy:\nmode: ipvs\nipvs:\nstrictARP: true\n

Port 7946 (TCP & UDP) must be allowed between the nodes. In addition, before installing MetalLB, make sure there is no other software running on port 7946 on the nodes, such as docker daemon.

"},{"location":"examples/metallb-loadbalancer/#install-metallb","title":"Install MetalLB","text":"
  1. Install MetalLB using the official Helm chart and k0s Helm extension manager:

    apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\n  metadata:\nname: k0s\nspec:\n  extensions:\n    helm:\n      repositories:\n      - name: metallb\n        url: https://metallb.github.io/metallb\n      charts:\n      - name: metallb\n        chartname: metallb/metallb\n        namespace: metallb\n

    Other installation methods are available in the MetalLB's official documentation.

  2. Create ConfigMap for MetalLB

    Next you need to create ConfigMap, which includes an IP address range for the load balancer. The pool of IPs must be dedicated to MetalLB's use. You can't reuse for example the Kubernetes node IPs or IPs controlled by other services. You can, however, use private IP addresses, for example 192.168.1.180-192.168.1.199, but then you need to take care of the routing from the external network if you need external access. In this example, we don't need it.

    Create a YAML file accordingly, and deploy it: kubectl apply -f metallb-l2-pool.yaml

    ---\napiVersion: metallb.io/v1beta1\nkind: IPAddressPool\nmetadata:\nname: first-pool\nnamespace: metallb-system\nspec:\naddresses:\n- <ip-address-range-start>-<ip-address-range-stop>\n---\napiVersion: metallb.io/v1beta1\nkind: L2Advertisement\nmetadata:\nname: example\nnamespace: metallb-system\n
  3. Deploy an example application (web server) with a load balancer

    apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
  4. Check your LoadBalancer

    Run the following command to see your LoadBalancer with the external-ip and port.

    kubectl get service -n web\n
  5. Access your example application

    If you used private IP addresses for MetalLB in the ConfigMap (in step 2), you should run the following command from the local network. Use the IP address from the previous step.

    curl <EXTERNAL-IP>\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/metallb-loadbalancer/#additional-information","title":"Additional information","text":"

For more information about MetalLB installation, take a look at the official MetalLB documentation.

"},{"location":"examples/metallb-loadbalancer/#alternative-examples","title":"Alternative examples","text":"

Get load balancer using cloud provider.

"},{"location":"examples/nginx-ingress/","title":"Installing NGINX Ingress Controller","text":"

This tutorial covers the installation of NGINX Ingress controller, which is an open source project made by the Kubernetes community. k0s doesn't come with an in-built Ingress controller, but it's easy to deploy NGINX Ingress as shown in this document. Other Ingress solutions can be used as well (see the links at the end of the page).

"},{"location":"examples/nginx-ingress/#nodeport-vs-loadbalancer-vs-ingress-controller","title":"NodePort vs LoadBalancer vs Ingress controller","text":"

Kubernetes offers multiple options for exposing services to external networks. The main options are NodePort, LoadBalancer and Ingress controller.

NodePort, as the name says, means that a port on a node is configured to route incoming requests to a certain service. The port range is limited to 30000-32767, so you cannot expose commonly used ports like 80 or 443 with NodePort.

LoadBalancer is a service, which is typically implemented by the cloud provider as an external service (with additional cost). Load balancers can also be installed internally in the Kubernetes cluster with MetalLB, which is typically used for bare-metal deployments. Load balancer provides a single IP address to access your services, which can run on multiple nodes.

Ingress controller helps to consolidate routing rules of multiple applications into one entity. Ingress controller is exposed to an external network with the help of NodePort, LoadBalancer or host network. You can also use Ingress controller to terminate TLS for your domain in one place, instead of terminating TLS for each application separately.

"},{"location":"examples/nginx-ingress/#nginx-ingress-controller","title":"NGINX Ingress Controller","text":"

NGINX Ingress Controller is a very popular Ingress for Kubernetes. In many cloud environments, it can be exposed to an external network by using the load balancer offered by the cloud provider. However, cloud load balancers are not necessary. Load balancer can also be implemented with MetalLB, which can be deployed in the same Kubernetes cluster. Another option to expose the Ingress controller to an external network is to use NodePort. The third option is to use host network. All of these alternatives are described in more detail on below, with separate examples.

"},{"location":"examples/nginx-ingress/#install-nginx-using-nodeport","title":"Install NGINX using NodePort","text":"

Installing NGINX using NodePort is the most simple example for Ingress Controller as we can avoid the load balancer dependency. NodePort is used for exposing the NGINX Ingress to the external network.

  1. Install NGINX Ingress Controller (using the official manifests by the ingress-nginx project)

    kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
  2. Check that the Ingress controller pods have started

    kubectl get pods -n ingress-nginx\n
  3. Check that you can see the NodePort service

    kubectl get services -n ingress-nginx\n
  4. From version v1.0.0 of the Ingress-NGINX Controller, a ingressclass object is required.

    In the default installation, an ingressclass object named nginx has already been created.

    $ kubectl -n ingress-nginx get ingressclasses\nNAME    CONTROLLER             PARAMETERS   AGE\nnginx   k8s.io/ingress-nginx   <none>       162m\n

    If this is only instance of the Ingresss-NGINX controller, you should add the annotation ingressclass.kubernetes.io/is-default-class in your ingress class:

    kubectl -n ingress-nginx annotate ingressclasses nginx ingressclass.kubernetes.io/is-default-class=\"true\"\n
  5. Try connecting the Ingress controller using the NodePort from the previous step (in the range of 30000-32767)

    curl <worker-external-ip>:<node-port>\n

    If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it.

  6. Deploy a small test application (httpd web server) to verify your Ingress controller.

    Create the following YAML file and name it \"simple-web-server-with-ingress.yaml\":

    apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 5000\ntargetPort: 80\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: web-server-ingress\nnamespace: web\nspec:\ningressClassName: nginx\nrules:\n- host: web.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: web-server-service\nport:\nnumber: 5000\n

    Deploy the app:

    kubectl apply -f simple-web-server-with-ingress.yaml\n
  7. Verify that you can access your application using the NodePort from step 3.

    curl <worker-external-ip>:<node-port> -H 'Host: web.example.com'\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/nginx-ingress/#install-nginx-using-loadbalancer","title":"Install NGINX using LoadBalancer","text":"

In this example you'll install NGINX Ingress controller using LoadBalancer on k0s.

  1. Install LoadBalancer

    There are two alternatives to install LoadBalancer on k0s. Follow the links in order to install LoadBalancer.

    - MetalLB as a pure SW solution running internally in the k0s cluster - Cloud provider's load balancer running outside of the k0s cluster

  2. Verify LoadBalancer

    In order to proceed you need to have a load balancer available for the Kubernetes cluster. To verify that it's available, deploy a simple load balancer service.

    apiVersion: v1\nkind: Service\nmetadata:\nname: example-load-balancer\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
    kubectl apply -f example-load-balancer.yaml\n

    Then run the following command to see your LoadBalancer with an external IP address.

    kubectl get service example-load-balancer\n

    If the LoadBalancer is not available, you won't get an IP address for EXTERNAL-IP. Instead, it's <pending>. In this case you should go back to the previous step and check your load balancer availability.

    If you are successful, you'll see a real IP address and you can proceed further.

    You can delete the example-load-balancer:

    kubectl delete -f example-load-balancer.yaml\n
  3. Install NGINX Ingress Controller by following the steps in the previous chapter (step 1 to step 4).

  4. Edit the NGINX Ingress Controller to use LoadBalancer instead of NodePort

    kubectl edit service ingress-nginx-controller -n ingress-nginx\n

    Find the spec.type field and change it from \"NodePort\" to \"LoadBalancer\".

  5. Check that you can see the ingress-nginx-controller with type LoadBalancer.

    kubectl get services -n ingress-nginx\n
  6. Try connecting to the Ingress controller

    If you used private IP addresses for MetalLB in step 2, you should run the following command from the local network. Use the IP address from the previous step, column EXTERNAL-IP.

    curl <EXTERNAL-IP>\n

    If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it using LoadBalancer.

  7. Deploy a small test application (httpd web server) to verify your Ingress.

    Create the YAML file \"simple-web-server-with-ingress.yaml\" as described in the previous chapter (step 6) and deploy it.

    kubectl apply -f simple-web-server-with-ingress.yaml\n
  8. Verify that you can access your application through the LoadBalancer and Ingress controller.

    curl <worker-external-ip> -H 'Host: web.example.com'\n

    If you are successful, you should see <html><body><h1>It works!</h1></body></html>.

"},{"location":"examples/nginx-ingress/#install-nginx-using-host-network","title":"Install NGINX using host network","text":"

The host network option exposes Ingress directly using the worker nodes' IP addresses. It also allows you to use ports 80 and 443. This option doesn't use any Service objects (ClusterIP, NodePort, LoadBalancer) and it has the limitation that only one Ingress controller Pod may be scheduled on each cluster node.

  1. Download the official NGINX Ingress Controller manifests:

    wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
  2. Edit deploy.yaml. Find the Deployment ingress-nginx-controller and enable the host network option by adding the hostNetwork line:

    spec:\n  template:\n    spec:\n      hostNetwork: true\n

    You can also remove the Service ingress-nginx-controller completely, because it won't be needed.

  3. Install Ingress

    kubectl apply -f deploy.yaml\n
  4. Try to connect to the Ingress controller, deploy a test application and verify the access. These steps are similar to the previous install methods.

"},{"location":"examples/nginx-ingress/#additional-information","title":"Additional information","text":"

For more information about NGINX Ingress Controller installation, take a look at the official ingress-nginx installation guide and bare-metal considerations.

"},{"location":"examples/nginx-ingress/#alternative-examples-for-ingress-controllers-on-k0s","title":"Alternative examples for Ingress Controllers on k0s","text":"

Traefik Ingress

"},{"location":"examples/rook-ceph/","title":"Installing Ceph Storage with Rook","text":"

In this tutorial you'll create a Ceph storage for k0s. Ceph is a highly scalable, distributed storage solution. It offers object, block, and file storage, and it's designed to run on any common hardware. Ceph implements data replication into multiple volumes that makes it fault-tolerant. Another clear advantage of Ceph in Kubernetes is the dynamic provisioning. This means that applications just need to request the storage (persistent volume claim) and Ceph will automatically provision the requested storage without a manual creation of the persistent volume each time.

Unfortunately, the Ceph deployment as such can be considered a bit complex. To make the deployment easier, we'll use Rook operator. Rook is a CNCF project and it's dedicated to storage orchestration. Rook supports several storage solutions, but in this tutorial we will use it to manage Ceph.

This tutorial uses three worker nodes and one controller. It's possible to use less nodes, but using three worker nodes makes it a good example for deploying a high-available storage cluster. We use external storage partitions, which are assigned to the worker nodes to be used by Ceph.

After the Ceph deployment we'll deploy a sample application (MongoDB) to use the storage in practice.

"},{"location":"examples/rook-ceph/#prerequisites","title":"Prerequisites","text":"
  • Linux OS
  • GitHub access
  • AWS account
  • Terraform
"},{"location":"examples/rook-ceph/#deployment-steps","title":"Deployment steps","text":""},{"location":"examples/rook-ceph/#1-preparations","title":"1. Preparations","text":"

In this example we'll use Terraform to create four Ubuntu VMs on AWS. Using Terraform makes the VM deployment fast and repeatable. You can avoid manually setting up everything in the AWS GUI. Moreover, when you have finished with the tutorial, it's very easy to tear down the VMs with Terraform (with one command). However, you can set up the nodes in many different ways and it doesn't make a difference in the following steps.

We will use k0sctl to create the k0s cluster. k0sctl repo also includes a ready-made Terraform configuration to create the VMs on AWS. We'll use that. Let's start be cloning the k0sctl repo.

git clone git@github.com:k0sproject/k0sctl.git\n

Take a look at the Terraform files

cd k0sctl/examples/aws-tf\nls -l\n

Open variables.tf and set the number of controller and worker nodes like this:

variable \"cluster_name\" {\ntype    = string\ndefault = \"k0sctl\"\n}\n\nvariable \"controller_count\" {\ntype    = number\ndefault = 1\n}\n\nvariable \"worker_count\" {\ntype    = number\ndefault = 3\n}\n\nvariable \"cluster_flavor\" {\ntype    = string\ndefault = \"t3.small\"\n}\n

Open main.tf to check or modify k0s version near the end of the file.

You can also configure a different name to your cluster and change the default VM type. t3.small (2 vCPUs, 2 GB RAM) runs just fine for this tutorial.

"},{"location":"examples/rook-ceph/#2-create-the-vms","title":"2. Create the VMs","text":"

For AWS, you need an account. Terraform will use the following environment variable: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN. You can easily copy-paste them from the AWS portal. For more information, see the AWS documentation.

When the environment variables are set, you can proceed with Terraform and deploy the VMs.

terraform init\nterraform apply\n

If you decide to create the VMs manually using AWS GUI, you need to disable source / destination checking. This needs to be disbled always for multi-node Kubernetes clusters in order to get the node-to-node communication working due to Network Address Translation. For Terraform this is already taken care of in the default configuration.

"},{"location":"examples/rook-ceph/#3-create-and-attach-the-volumes","title":"3. Create and attach the volumes","text":"

Ceph requires one of the following storage options for storing the data:

  • Raw devices (no partitions or formatted filesystems)
  • Raw partitions (no formatted filesystem)
  • PVs available from a storage class in block mode

We will be using raw partititions (AWS EBS volumes), which can be easily attached to the worker node VMs. They are automatically detected by Ceph with its default configuration.

Deploy AWS EBS volumes, one for each worker node. You can manually create three EBS volumes (for example 10 GB each) using the AWS GUI and attach those to your worker nodes. Formatting shouldn't be done. Instead, Ceph handles that part automatically.

After you have attached the EBS volumes to the worker nodes, log in to one of the workers and check the available block devices:

lsblk -f\n
NAME        FSTYPE   LABEL           UUID                                 FSAVAIL FSUSE% MOUNTPOINT\nloop0       squashfs                                                            0   100% /snap/amazon-ssm-agent/3552\nloop1       squashfs                                                            0   100% /snap/core18/1997\nloop2       squashfs                                                            0   100% /snap/snapd/11588\nloop3       squashfs                                                            0   100% /snap/lxd/19647\nnvme0n1\n\u2514\u2500nvme0n1p1 ext4     cloudimg-rootfs e8070c31-bfee-4314-a151-d1332dc23486    5.1G    33% /\nnvme1n1\n

The last line (nvme1n1) in this example printout corresponds to the attached EBS volume. Note that it doesn't have any filesystem (FSTYPE is empty). This meets the Ceph storage requirements and you are good to proceed.

"},{"location":"examples/rook-ceph/#4-install-k0s-using-k0sctl","title":"4. Install k0s using k0sctl","text":"

You can use terraform to automatically output a config file for k0sctl with the ip addresses and access details.

terraform output -raw k0s_cluster > k0sctl.yaml\n

After that deploying k0s becomes very easy with the ready-made configuration.

k0sctl apply --config k0sctl.yaml\n

It might take around 2-3 minutes for k0sctl to connect each node, install k0s and connect the nodes together to form a cluster.

"},{"location":"examples/rook-ceph/#5-access-k0s-cluster","title":"5. Access k0s cluster","text":"

To access your new cluster remotely, you can use k0sctl to fetch kubeconfig and use that with kubectl or Lens.

k0sctl kubeconfig --config k0sctl.yaml > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\nkubectl get nodes\n

The other option is to login to your controller node and use the k0s in-built kubectl to access the cluster. Then you don't need to worry about kubeconfig (k0s takes care of that automatically).

ssh -i aws.pem <username>@<ip-address>\nsudo k0s kubectl get nodes\n
"},{"location":"examples/rook-ceph/#6-deploy-rook","title":"6. Deploy Rook","text":"

To get started with Rook, let's first clone the Rook GitHub repo:

git clone --single-branch --branch release-1.7 https://github.com/rook/rook.git\ncd rook/cluster/examples/kubernetes/ceph\n

We will use mostly the default Rook configuration. However, k0s kubelet drectory must be configured in operator.yaml like this

ROOK_CSI_KUBELET_DIR_PATH: \"/var/lib/k0s/kubelet\"\n

To create the resources, which are needed by the Rook\u2019s Ceph operator, run

kubectl apply -f crds.yaml -f common.yaml -f operator.yaml\n

Now you should see the operator running. Check them with

kubectl get pods -n rook-ceph\n
"},{"location":"examples/rook-ceph/#7-deploy-ceph-cluster","title":"7. Deploy Ceph Cluster","text":"

Then you can proceed to create a Ceph cluster. Ceph will use the three EBS volumes attached to the worker nodes:

kubectl apply -f cluster.yaml\n

It takes some minutes to prepare the volumes and create the cluster. Once this is completed you should see the following output:

kubectl get pods -n rook-ceph\n
NAME                                                         READY   STATUS      RESTARTS   AGE\ncsi-cephfsplugin-nhxc8                                       3/3     Running     0          2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-ldhjp                 6/6     Running     0          2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-sxfm8                 6/6     Running     0          2m48s\ncsi-cephfsplugin-tj2bh                                       3/3     Running     0          2m48s\ncsi-cephfsplugin-z2rrl                                       3/3     Running     0          2m48s\ncsi-rbdplugin-5q7gq                                          3/3     Running     0          2m49s\ncsi-rbdplugin-8sfpd                                          3/3     Running     0          2m49s\ncsi-rbdplugin-f2xdz                                          3/3     Running     0          2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-g6vck                    6/6     Running     0          2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-zpmvr                    6/6     Running     0          2m49s\nrook-ceph-crashcollector-ip-172-31-0-76-64cb4c7775-m55x2     1/1     Running     0          45s\nrook-ceph-crashcollector-ip-172-31-13-183-654b46588d-djqsd   1/1     Running     0          2m57s\nrook-ceph-crashcollector-ip-172-31-15-5-67b68698f-gcjb7      1/1     Running     0          2m46s\nrook-ceph-mgr-a-5ffc65c874-8pxgv                             1/1     Running     0          58s\nrook-ceph-mon-a-ffcd85c5f-z89tb                              1/1     Running     0          2m59s\nrook-ceph-mon-b-fc8f59464-lgczk                              1/1     Running     0          2m46s\nrook-ceph-mon-c-69bd87b558-kl4nl                             1/1     Running     0          91s\nrook-ceph-operator-54cf7487d4-pl66p                          1/1     Running     0          4m57s\nrook-ceph-osd-0-dd4fd8f6-g6s9m                               1/1     Running     0          48s\nrook-ceph-osd-1-7c478c49c4-gkqml                             1/1     Running     0          47s\nrook-ceph-osd-2-5b887995fd-26492                             1/1     Running     0          46s\nrook-ceph-osd-prepare-ip-172-31-0-76-6b5fw                   0/1     Completed   0          28s\nrook-ceph-osd-prepare-ip-172-31-13-183-cnkf9                 0/1     Completed   0          25s\nrook-ceph-osd-prepare-ip-172-31-15-5-qc6pt                   0/1     Completed   0          23s\n
"},{"location":"examples/rook-ceph/#8-configure-ceph-block-storage","title":"8. Configure Ceph block storage","text":"

Before Ceph can provide storage to your cluster, you need to create a ReplicaPool and a StorageClass. In this example, we use the default configuration to create the block storage.

kubectl apply -f ./csi/rbd/storageclass.yaml\n
"},{"location":"examples/rook-ceph/#9-request-storage","title":"9. Request storage","text":"

Create a new manifest file mongo-pvc.yaml with the following content:

apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: mongo-pvc\nspec:\nstorageClassName: rook-ceph-block\naccessModes:\n- ReadWriteOnce\nresources:\nrequests:\nstorage: 2Gi\n

This will create Persistent Volume Claim (PVC) to request a 2 GB block storage from Ceph. Provioning will be done dynamically. You can define the block size freely as long as it fits to the available storage size.

kubectl apply -f mongo-pvc.yaml\n

You can now check the status of your PVC:

kubectl get pvc\n

When the PVC gets the requested volume reserved (bound), it should look like this:

kubectl get pvc\n
NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE\nmongo-pvc   Bound    pvc-08337736-65dd-49d2-938c-8197a8871739   2Gi        RWO            rook-ceph-block   6s\n
"},{"location":"examples/rook-ceph/#10-deploy-an-example-application","title":"10. Deploy an example application","text":"

Let's deploy a Mongo database to verify the Ceph storage. Create a new file mongo.yaml with the following content:

apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: mongo\nspec:\nselector:\nmatchLabels:\napp: mongo\ntemplate:\nmetadata:\nlabels:\napp: mongo\nspec:\ncontainers:\n- image: mongo:4.0\nname: mongo\nports:\n- containerPort: 27017\nname: mongo\nvolumeMounts:\n- name: mongo-persistent-storage\nmountPath: /data/db\nvolumes:\n- name: mongo-persistent-storage\npersistentVolumeClaim:\nclaimName: mongo-pvc\n

Deploy the database:

kubectl apply -f mongo.yaml\n
"},{"location":"examples/rook-ceph/#11-access-the-application","title":"11. Access the application","text":"

Open the MongoDB shell using the mongo pod:

kubectl get pods\n
NAME                    READY   STATUS    RESTARTS   AGE\nmongo-b87cbd5cc-4wx8t   1/1     Running   0          76s\n
kubectl exec -it mongo-b87cbd5cc-4wx8t -- mongo\n

Create a DB and insert some data:

> use testDB\nswitched to db testDB\n> db.testDB.insertOne( {name: \"abc\", number: 123  })\n{\n  \"acknowledged\" : true,\n  \"insertedId\" : ObjectId(\"60815690a709d344f83b651d\")\n}\n> db.testDB.insertOne( {name: \"bcd\", number: 234  })\n{\n  \"acknowledged\" : true,\n  \"insertedId\" : ObjectId(\"6081569da709d344f83b651e\")\n}\n

Read the data:

> db.getCollection(\"testDB\").find()\n{ \"_id\" : ObjectId(\"60815690a709d344f83b651d\"), \"name\" : \"abc\", \"number\" : 123 }\n{ \"_id\" : ObjectId(\"6081569da709d344f83b651e\"), \"name\" : \"bcd\", \"number\" : 234 }\n>\n

You can also try to restart the mongo pod or restart the worker nodes to verity that the storage is persistent.

"},{"location":"examples/rook-ceph/#12-clean-up","title":"12. Clean-up","text":"

You can use Terraform to take down the VMs:

terraform destroy\n

Remember to delete the EBS volumes separately.

"},{"location":"examples/rook-ceph/#conclusions","title":"Conclusions","text":"

You have now created a replicated Ceph storage for k0s. All you data is stored to multiple disks at the same time so you have a fault-tolerant solution. You also have enabled dynamic provisioning. Your applications can request the available storage without a manual creation of the persistent volumes each time.

This was just one example to deploy distributed storage to k0s cluster using an operator. You can easily use different Kubernetes storage solutions with k0s.

"},{"location":"examples/traefik-ingress/","title":"Installing Traefik Ingress Controller","text":"

You can configure k0s with the Traefik ingress controller, a MetalLB service loadbalancer, and deploy the Traefik Dashboard using a service sample. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml file during cluster configuration.

"},{"location":"examples/traefik-ingress/#1-configure-k0syaml","title":"1. Configure k0s.yaml","text":"

Configure k0s to install Traefik and MetalLB during cluster bootstrapping by adding their Helm charts as extensions in the k0s configuration file (k0s.yaml).

Note:

A good practice is to have a small range of IP addresses that are addressable on your network, preferably outside the assignment pool your DHCP server allocates (though any valid IP range should work locally on your machine). Providing an addressable range allows you to access your load balancer and Ingress services from anywhere on your local network.

extensions:\nhelm:\nrepositories:\n- name: traefik\nurl: https://traefik.github.io/charts\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: traefik\nchartname: traefik/traefik\nversion: \"20.5.3\"\nnamespace: default\n- name: metallb\nchartname: bitnami/metallb\nversion: \"2.5.4\"\nnamespace: default\nvalues: |\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 192.168.0.5-192.168.0.10\n
"},{"location":"examples/traefik-ingress/#2-retrieve-the-load-balancer-ip","title":"2. Retrieve the Load Balancer IP","text":"

After you start your cluster, run kubectl get all to confirm the deployment of Traefik and MetalLB. The command should return a response with the metallb and traefik resources, along with a service load balancer that has an assigned EXTERNAL-IP.

kubectl get all\n

Output:

NAME                                                 READY   STATUS    RESTARTS   AGE\npod/metallb-1607085578-controller-864c9757f6-bpx6r   1/1     Running   0          81s\npod/metallb-1607085578-speaker-245c2                 1/1     Running   0          60s\npod/traefik-1607085579-77bbc57699-b2f2t              1/1     Running   0          81s\n\nNAME                         TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                      AGE\nservice/kubernetes           ClusterIP      10.96.0.1        <none>           443/TCP                      96s\nservice/traefik-1607085579   LoadBalancer   10.105.119.102   192.168.0.5      80:32153/TCP,443:30791/TCP   84s\n\nNAME                                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE\ndaemonset.apps/metallb-1607085578-speaker   1         1         1       1            1           kubernetes.io/os=linux   87s\n\nNAME                                            READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/metallb-1607085578-controller   1/1     1            1           87s\ndeployment.apps/traefik-1607085579              1/1     1            1           84s\n\nNAME                                                       DESIRED   CURRENT   READY   AGE\nreplicaset.apps/metallb-1607085578-controller-864c9757f6   1         1         1       81s\nreplicaset.apps/traefik-1607085579-77bbc57699              1         1         1       81s\n

Take note of the EXTERNAL-IP given to the service/traefik-n load balancer. In this example, 192.168.0.5 has been assigned and can be used to access services via the Ingress proxy:

NAME                         TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                      AGE\nservice/traefik-1607085579   LoadBalancer   10.105.119.102   192.168.0.5      80:32153/TCP,443:30791/TCP   84s\n

Receiving a 404 response here is normal, as you've not configured any Ingress resources to respond yet:

curl http://192.168.0.5\n
404 page not found\n
"},{"location":"examples/traefik-ingress/#3-deploy-and-access-the-traefik-dashboard","title":"3. Deploy and access the Traefik Dashboard","text":"

With an available and addressable load balancer present on your cluster, now you can quickly deploy the Traefik dashboard and access it from anywhere on your LAN (assuming that MetalLB is configured with an addressable range).

  1. Create the Traefik Dashboard IngressRoute in a YAML file:

    apiVersion: traefik.containo.us/v1alpha1\nkind: IngressRoute\nmetadata:\nname: dashboard\nspec:\nentryPoints:\n- web\nroutes:\n- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)\nkind: Rule\nservices:\n- name: api@internal\nkind: TraefikService\n
  2. Deploy the resource:

    kubectl apply -f traefik-dashboard.yaml\n

    Output:

    ingressroute.traefik.containo.us/dashboard created\n

    At this point you should be able to access the dashboard using the EXTERNAL-IP that you noted above by visiting http://192.168.0.5/dashboard/ in your browser:

  3. Create a simple whoami Deployment, Service, and Ingress manifest:

    apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: whoami-deployment\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: whoami\ntemplate:\nmetadata:\nlabels:\napp: whoami\nspec:\ncontainers:\n- name: whoami-container\nimage: containous/whoami\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: whoami-service\nspec:\nports:\n- name: http\ntargetPort: 80\nport: 80\nselector:\napp: whoami\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: whoami-ingress\nspec:\nrules:\n- http:\npaths:\n- path: /whoami\npathType: Exact\nbackend:\nservice:\nname: whoami-service\nport:\nnumber: 80\n
  4. Apply the manifests:

    kubectl apply -f whoami.yaml\n

    Output:

    deployment.apps/whoami-deployment created\nservice/whoami-service created\ningress.networking.k8s.io/whoami-ingress created\n
  5. Test the ingress and service:

    curl http://192.168.0.5/whoami\n

    Output:

    Hostname: whoami-deployment-85bfbd48f-7l77c\nIP: 127.0.0.1\nIP: ::1\nIP: 10.244.214.198\nIP: fe80::b049:f8ff:fe77:3e64\nRemoteAddr: 10.244.214.196:34858\nGET /whoami HTTP/1.1\nHost: 192.168.0.5\nUser-Agent: curl/7.68.0\nAccept: */*\nAccept-Encoding: gzip\nX-Forwarded-For: 192.168.0.82\nX-Forwarded-Host: 192.168.0.5\nX-Forwarded-Port: 80\nX-Forwarded-Proto: http\nX-Forwarded-Server: traefik-1607085579-77bbc57699-b2f2t\nX-Real-Ip: 192.168.0.82\n
"},{"location":"examples/traefik-ingress/#further-details","title":"Further details","text":"

With the Traefik Ingress Controller it is possible to use 3rd party tools, such as ngrok, to go further and expose your load balancer to the world. In doing this you enable dynamic certificate provisioning through Let's Encrypt, using either cert-manager or Traefik's own built-in ACME provider.

"},{"location":"examples/oidc/oidc-cluster-configuration/","title":"OpenID Connect integration","text":"

Developers use kubectl to access Kubernetes clusters. By default kubectl uses a certificate to authenticate to the Kubernetes API. This means that when multiple developers need to access a cluster, the certificate needs to be shared. Sharing the credentials to access a Kubernetes cluster presents a significant security problem. Compromise of the certificate is very easy and the consequences can be catastrophic.

In this tutorial, we walk through how to set up your Kubernetes cluster to add Single Sign-On support for kubectl using OpenID Connect (OIDC).

"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authentication","title":"OpenID Connect based authentication","text":"

OpenID Connect can be enabled by modifying k0s configuration (using extraArgs).

"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-overview","title":"Configuring k0s: overview","text":"

There are list of arguments for the kube-api that allows us to manage OIDC based authentication

Parameter Description Example Required --oidc-issuer-url URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https:// scheme are accepted. This is typically the provider's discovery URL without a path, for example \"https://accounts.google.com\" or \"https://login.salesforce.com\". This URL should point to the level below .well-known/openid-configuration If the discovery URL is https://accounts.google.com/.well-known/openid-configuration, the value should be https://accounts.google.com Yes --oidc-client-id A client id that all tokens must be issued for. kubernetes Yes --oidc-username-claim JWT claim to use as the user name. By default sub, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as email or name, depending on their provider. However, claims other than email will be prefixed with the issuer URL to prevent naming clashes with other plugins. sub No --oidc-username-prefix Prefix prepended to username claims to prevent clashes with existing names (such as system: users). For example, the value oidc: will create usernames like oidc:jane.doe. If this flag isn't provided and --oidc-username-claim is a value other than email the prefix defaults to ( Issuer URL )# where ( Issuer URL ) is the value of --oidc-issuer-url. The value - can be used to disable all prefixing. oidc: No --oidc-groups-claim JWT claim to use as the user's group. If the claim is present it must be an array of strings. groups No --oidc-groups-prefix Prefix prepended to group claims to prevent clashes with existing names (such as system: groups). For example, the value oidc: will create group names like oidc:engineering and oidc:infra. oidc: No --oidc-required-claim A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims. claim=value No --oidc-ca-file The path to the certificate for the CA that signed your identity provider's web certificate. Defaults to the host's root CAs. /etc/kubernetes/ssl/kc-ca.pem No

To set up bare minimum example we need to use:

  • oidc-issuer-url
  • oidc-client-id
  • oidc-username-claim
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-prerequisites","title":"Configuring k0s: prerequisites","text":"

You will require:

  • issuer-url
  • client-id
  • username-claim

Please, refer to providers configuration guide or your selected OIDC provider's own documentation (we don't cover all of them in k0s docs).

"},{"location":"examples/oidc/oidc-cluster-configuration/#configuration-example","title":"Configuration example","text":"
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\napi:\nextraArgs:\noidc-issuer-url: <issuer-url>\noidc-client-id: <client-id>\noidc-username-claim: email # we use email token claim field as a username\n

Use the configuration as a starting point. Continue with configuration guide for finishing k0s cluster installation.

"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authorisation","title":"OpenID Connect based authorisation","text":"

There are two alternative options to implement authorization

"},{"location":"examples/oidc/oidc-cluster-configuration/#provider-based-role-mapping","title":"Provider based role mapping","text":"

Please refer to the providers configuration guide. Generally speaking, using the oidc-groups-claim argument let's you specify which token claim is used a list of RBAC roles for a given user. You still need somehow sync up that data between your OIDC provider and kube-api RBAC system.

"},{"location":"examples/oidc/oidc-cluster-configuration/#manual-roles-management","title":"Manual roles management","text":"

To use manual role management for each user you will need to create a role and role-binding for each new user within k0s cluster. The role can be shared for all the users. Role example:

---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nnamespace: default\nname: dev-role\nrules:\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [\"*\"]\n

RoleBinding example:

kind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: dev-role-binding\nsubjects:\n- kind: User\nname: <provider side user id>\nroleRef:\nkind: Role\nname: dev-role\napiGroup: rbac.authorization.k8s.io\n

The provided Role example is an all-inclusive and comprehensive example and should be tuned up to your actual requirements.

"},{"location":"examples/oidc/oidc-cluster-configuration/#kubeconfig-management","title":"kubeconfig management","text":"

NB: it's not safe to provide full content of the /var/lib/k0s/pki/admin.conf to the end-user. Instead, create a user specific kubeconfig with limited permissions.

The authorization side of the kubeconfig management is described in provider specific guides. Use /var/lib/k0s/pki/admin.conf as a template for cluster specific kubeconfig.

"},{"location":"examples/oidc/oidc-cluster-configuration/#references","title":"References","text":"

OAuth2 spec Kubernetes authorization system (RBAC) Kubernetes authenticating system

"},{"location":"examples/oidc/oidc-provider-configuration/","title":"Providers","text":"

We use Google Cloud as a provider for the sake of the example. Check your vendor documentation in case if you use some other vendor.

"},{"location":"examples/oidc/oidc-provider-configuration/#notes-on-stand-alone-providers","title":"Notes on stand-alone providers","text":"

If you are using stand-alone OIDC provider, you might need to specify oidc-ca-file argument for the kube-api.

"},{"location":"examples/oidc/oidc-provider-configuration/#google-cloud","title":"Google Cloud","text":"

We use k8s-oidc-helper tool to create proper kubeconfig user record.

The issuer URL for the Google cloud is https://accounts.google.com

"},{"location":"examples/oidc/oidc-provider-configuration/#creating-an-application","title":"Creating an application","text":"
  • Go to the Google Cloud Dashboard
  • Create a new project in your organization
  • Go to the \"Credentials\" page
  • Create \"OAuth consent screen\"
"},{"location":"examples/oidc/oidc-provider-configuration/#creating-a-user-credentials","title":"Creating a user credentials","text":"
  • Go to the Google Cloud Dashboard
  • Go to the \"Credentials\" page
  • Create new credentials. Select \"OAuth client ID\" as a type.
  • Select \"Desktop\" app as an application type.
  • Save client ID and client secret
"},{"location":"examples/oidc/oidc-provider-configuration/#creating-kubeconfig-user-record","title":"Creating kubeconfig user record","text":"

Use the command and follow the instructions:

k8s-oidc-helper --client-id=<CLIENT_ID> \\\n--client-secret=<CLIENT_SECRET> \\\n--write=true\n
"},{"location":"examples/oidc/oidc-provider-configuration/#using-kubelogin","title":"Using kubelogin","text":"

For other OIDC providers it is possible to use kubelogin plugin. Please refer to the setup guide for details.

"},{"location":"examples/oidc/oidc-provider-configuration/#google-cloud-example-using-kubelogin","title":"Google Cloud example using kubelogin","text":"
kubectl oidc-login setup \\\n--oidc-issuer-url=https://accounts.google.com \\\n--oidc-client-id=<CLIENT_ID> \\\n--oidc-client-secret=<CLIENT_SECRET>\n\n  kubectl config set-credentials oidc \\\n--exec-api-version=client.authentication.k8s.io/v1beta1 \\\n--exec-command=kubectl \\\n--exec-arg=oidc-login \\\n--exec-arg=get-token \\\n--exec-arg=--oidc-issuer-url=https://accounts.google.com \\\n--exec-arg=--oidc-client-id=<CLIENT_ID>  \\\n--exec-arg=--oidc-client-secret=<CLIENT_SECRET>\n

You can switch the current context to oidc.

kubectl config set-context --current --user=oidc

"},{"location":"internal/publishing_docs_using_mkdocs/","title":"Publishing Docs","text":"

We use mkdocs and mike for publishing docs to docs.k0sproject.io. This guide will provide a simple how-to on how to configure and deploy newly added docs to our website.

"},{"location":"internal/publishing_docs_using_mkdocs/#requirements","title":"Requirements","text":"

Install mike: https://github.com/jimporter/mike#installation

"},{"location":"internal/publishing_docs_using_mkdocs/#adding-a-new-link-to-the-navigation","title":"Adding A New link to the Navigation","text":"
  • All docs must live under the docs directory (I.E., changes to the main README.md are not reflected in the website).
  • Add a new link under nav in the main mkdocs.yml file:
nav:\n- Overview: README.md\n- Creating A Cluster:\n- Quick Start Guide: create-cluster.md\n- Run in Docker: k0s-in-docker.md\n- Single node set-up: k0s-single-node.md\n- Configuration Reference:\n- Architecture: architecture.md\n- Networking: networking.md\n- Configuration Options: configuration.md\n- Using Cloud Providers: cloud-providers.md\n- Running k0s with Traefik: examples/traefik-ingress.md\n- Running k0s as a service: install.md\n- k0s CLI Help Pages: cli/k0s.md\n- Deploying Manifests: manifests.md\n- FAQ: FAQ.md\n- Troubleshooting: troubleshooting.md\n- Contributing:\n- Overview: contributors/overview.md\n- Workflow: contributors/github_workflow.md\n- Testing: contributors/testing.md\n
  • Once your changes are pushed to main, the \"Publish Docs\" jos will start running: https://github.com/k0sproject/k0s/actions?query=workflow%3A%22Publish+docs+via+GitHub+Pages%22
  • You should see the deployment outcome in the gh-pages deployment page: https://github.com/k0sproject/k0s/deployments/activity_log?environment=github-pages
"},{"location":"internal/publishing_docs_using_mkdocs/#testing-docs-locally","title":"Testing docs locally","text":"

We've got a dockerized setup for easily testing docs locally. Simply run make docs-serve-dev. The docs will be available on http://localhost:8000.

Note If you have something already running locally on port 8000 you can choose another port like so: make docs-serve-dev DOCS_DEV_PORT=9999. The docs will then be available on http://localhost:9999.

"},{"location":"internal/upgrading-calico/","title":"Upgrading Calico","text":"

k0s bundles Kubernetes manifests for Calico. The manifests are retrieved from the official Calico repo.

As fetching and modifying the entire multi-thousand line file is error-prone, you may follow these steps to upgrade Calico to the latest version:

  1. run ./hack/get-calico.sh <version>
  2. check the git diff to see if it looks sensible
  3. re-apply our manual adjustments (documented below)
  4. run make bindata-manifests
  5. compile, pray, and test
  6. commit and create a PR
"},{"location":"internal/upgrading-calico/#manual-adjustments","title":"Manual Adjustments","text":"

Note: All manual adjustments should be fairly obvious from the git diff. This section attempts to provide a sanity checklist to go through and make sure we still have those changes applied. The code blocks in this section are our modifications, not the calico originals.

To see the diff without CRDs, you can do something like:

git diff ':!static/manifests/calico/CustomResourceDefinition'\n

That'll make it easier to spot any needed changes.

static/manifests/calico/DaemonSet/calico-node.yaml:

  • variable-based support for both vxlan and ipip (search for ipip to find):
{{- if eq .Mode \"ipip\" }}\n# Enable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: {{ .Overlay }}\n# Enable or Disable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: \"Never\"\n{{- else if eq .Mode \"vxlan\" }}\n# Disable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: \"Never\"\n# Enable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: {{ .Overlay }}\n- name: FELIX_VXLANPORT\nvalue: \"{{ .VxlanPort }}\"\n- name: FELIX_VXLANVNI\nvalue: \"{{ .VxlanVNI }}\"\n{{- end }}\n
  • iptables auto detect:
# Auto detect the iptables backend\n- name: FELIX_IPTABLESBACKEND\nvalue: \"auto\"\n
  • variable-based WireGuard support:
{{- if .EnableWireguard }}\n- name: FELIX_WIREGUARDENABLED\nvalue: \"true\"\n{{- end }}\n
  • variable-based cluster CIDR:
- name: CALICO_IPV4POOL_CIDR\nvalue: \"{{ .ClusterCIDR }}\"\n
  • custom backend and MTU
# calico-config.yaml\ncalico_backend: \"{{ .Mode }}\"\nveth_mtu: \"{{ .MTU }}\"\n
  • remove bgp from CLUSTER_TYPE
- name: CLUSTER_TYPE\nvalue: \"k8s\"\n
  • disable BIRD checks on liveness and readiness as we don't support BGP by removing -bird-ready and -bird-live from the readiness and liveness probes respectively
"},{"location":"internal/upgrading-calico/#container-image-names","title":"Container image names","text":"

Instead of hardcoded image names and versions use placeholders to support configuration level settings. Following placeholders are used:

  • CalicoCNIImage for calico/cni
  • CalicoNodeImage for calico/node
  • CalicoKubeControllersImage for calico/kube-controllers

Also, all containers in manifests were modified to have 'imagePullPolicy' field:

imagePullPolicy: {{ .PullPolicy }}\n

Example:

# calico-node.yaml\nimage: {{ .CalicoCNIImage }}\n
"}]} \ No newline at end of file diff --git a/head/selinux/index.html b/head/selinux/index.html index ef87e08dc785..05b31572772f 100644 --- a/head/selinux/index.html +++ b/head/selinux/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/sitemap.xml b/head/sitemap.xml index be691f6ef00f..738c3dbbfcaf 100644 --- a/head/sitemap.xml +++ b/head/sitemap.xml @@ -2,522 +2,522 @@ https://docs.k0sproject.io/head/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/CODE_OF_CONDUCT/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/FAQ/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/airgap-install/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/architecture/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/autopilot-multicommand/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/autopilot/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/backup/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cis_benchmark/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cloud-providers/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/commercial-support/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/configuration-validation/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/configuration/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/conformance-testing/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/containerd_config/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/custom-ca/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/custom-cri-runtime/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/dockershim/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/dual-stack/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/dynamic-configuration/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/environment-variables/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/experimental-windows/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/extensions/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/external-runtime-deps/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/helm-charts/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/high-availability/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/install/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/k0s-in-docker/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/k0s-multi-node/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/k0s-single-node/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/k0sctl-install/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/manifests/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/networking/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/nllb/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/podsecurity/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/raspberry-pi4/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/reinstall-k0sctl/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/releases/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/remove_controller/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/reset/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/runtime/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/selinux/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/shell-completion/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/storage/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/system-monitoring/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/system-requirements/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/troubleshooting/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/upgrade/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/user-management/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/worker-node-config/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_airgap/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_airgap_list-images/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_api/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_backup/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_completion/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_config/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_config_create/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_config_edit/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_config_status/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_config_validate/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_controller/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_ctr/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_docs/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_etcd/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_etcd_leave/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_etcd_member-list/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_install/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_install_controller/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_install_worker/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_kubeconfig/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_kubeconfig_admin/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_kubeconfig_create/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_kubectl/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_reset/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_restore/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_start/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_status/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_status_components/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_stop/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_sysinfo/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_token/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_token_create/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_token_invalidate/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_token_list/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_token_pre-shared/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_version/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/cli/k0s_worker/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/contributors/CODE_OF_CONDUCT/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/contributors/github_workflow/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/contributors/overview/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/contributors/testing/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/ambassador-ingress/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/ansible-playbook/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/gitops-flux/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/metallb-loadbalancer/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/nginx-ingress/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/rook-ceph/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/traefik-ingress/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/oidc/oidc-cluster-configuration/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/examples/oidc/oidc-provider-configuration/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/internal/publishing_docs_using_mkdocs/ - 2023-09-15 + 2023-09-18 daily https://docs.k0sproject.io/head/internal/upgrading-calico/ - 2023-09-15 + 2023-09-18 daily \ No newline at end of file diff --git a/head/sitemap.xml.gz b/head/sitemap.xml.gz index 8c49fce1c59d..038b99561f49 100644 Binary files a/head/sitemap.xml.gz and b/head/sitemap.xml.gz differ diff --git a/head/storage/index.html b/head/storage/index.html index d548d1f2c712..2dd5c276d86f 100644 --- a/head/storage/index.html +++ b/head/storage/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/system-monitoring/index.html b/head/system-monitoring/index.html index 118155190900..371aaff039bc 100644 --- a/head/system-monitoring/index.html +++ b/head/system-monitoring/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/upgrade/index.html b/head/upgrade/index.html index 77c7eb7f58b7..93c9c269a6cb 100644 --- a/head/upgrade/index.html +++ b/head/upgrade/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - + diff --git a/head/user-management/index.html b/head/user-management/index.html index 47e67a11a558..7696b43db7fe 100644 --- a/head/user-management/index.html +++ b/head/user-management/index.html @@ -20,8 +20,9 @@ + - + @@ -29,15 +30,18 @@ - + - + + + + @@ -74,7 +78,7 @@ - + @@ -124,6 +128,7 @@
@@ -143,29 +148,36 @@
- - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - - - - - - - - - - - - - - - + +
+ + + + + + + + + + + + + + + + + +
+ - + - +