diff --git a/404.html b/404.html index 94219e1fbb2..abd18edd052 100644 --- a/404.html +++ b/404.html @@ -7,7 +7,7 @@ Page Not Found | Apache Linkis - + @@ -15,7 +15,7 @@
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/844135d6.e77f8473.js b/assets/js/844135d6.92d7ee77.js similarity index 99% rename from assets/js/844135d6.e77f8473.js rename to assets/js/844135d6.92d7ee77.js index 4f4da3f91b2..eda501d084a 100644 --- a/assets/js/844135d6.e77f8473.js +++ b/assets/js/844135d6.92d7ee77.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[5909],{3905:function(e,t,i){i.d(t,{Zo:function(){return p},kt:function(){return h}});var a=i(67294);function n(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}function r(e,t){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),i.push.apply(i,a)}return i}function o(e){for(var t=1;t=0||(n[i]=e[i]);return n}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(n[i]=e[i])}return n}var s=a.createContext({}),u=function(e){var t=a.useContext(s),i=t;return e&&(i="function"==typeof e?e(t):o(o({},t),e)),i},p=function(e){var t=u(e.components);return a.createElement(s.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var i=e.components,n=e.mdxType,r=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),m=u(i),h=n,k=m["".concat(s,".").concat(h)]||m[h]||c[h]||r;return i?a.createElement(k,o(o({ref:t},p),{},{components:i})):a.createElement(k,o({ref:t},p))}));function h(e,t){var i=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var r=i.length,o=new Array(r);o[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,o[1]=l;for(var u=2;u=0||(n[i]=e[i]);return n}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(n[i]=e[i])}return n}var s=a.createContext({}),u=function(e){var t=a.useContext(s),i=t;return e&&(i="function"==typeof e?e(t):o(o({},t),e)),i},p=function(e){var t=u(e.components);return a.createElement(s.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var i=e.components,n=e.mdxType,r=e.originalType,s=e.parentName,p=l(e,["components","mdxType","originalType","parentName"]),m=u(i),h=n,k=m["".concat(s,".").concat(h)]||m[h]||c[h]||r;return i?a.createElement(k,o(o({ref:t},p),{},{components:i})):a.createElement(k,o({ref:t},p))}));function h(e,t){var i=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var r=i.length,o=new Array(r);o[0]=m;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l.mdxType="string"==typeof e?e:n,o[1]=l;for(var u=2;u=d)&&Object.keys(n.O).every((function(e){return n.O[e](c[r])}))?c.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},n.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(a,{a:a}),a},c=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((function(a){b[a]=function(){return e[a]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,a){for(var c in a)n.o(a,c)&&!n.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(a,c){return n.f[c](e,a),a}),[]))},n.u=function(e){return"assets/js/"+({328:"4cc9882f",359:"2124c49e",639:"1ef83aab",1012:"b359ab7d",1226:"32c11423",1362:"6d204534",1402:"45047182",2170:"5526e2c8",2275:"f2e7bc47",2647:"d0685248",2698:"a534d5a4",2699:"6b4f6f6d",2794:"c976da7c",2814:"68f4675e",3009:"8e27a41e",3358:"ba75a7e1",3587:"45350984",3671:"6248a31d",4065:"217deffc",4280:"781d1b70",4380:"02f6a4b8",4443:"49ee9fc2",4452:"b047bf19",4484:"a854c309",4591:"77ddc047",4736:"7e21a02f",4984:"07edeecc",5140:"c3c3ee8a",5142:"d77f29dd",5233:"f2678917",5705:"166c3354",5909:"844135d6",6288:"e59b4707",6339:"e4bc1c20",6652:"78060cbc",6959:"322e6455",7015:"33f34b53",7029:"9b73e49d",7058:"d89de855",7342:"16b1aeb6",7479:"4c92610f",7586:"3e78e8ed",7766:"af1aaf24",8301:"a67041c8",8398:"6131eab8",8605:"5be510ab",8785:"127364d6",8890:"798fb933",9235:"e9ffd44c",9409:"15a0842e",9617:"cf38eb0d",9646:"4fdf3839",9782:"55dfda34",9828:"787028e7",9991:"d3b38238",10001:"8eb4e46b",10201:"57cd18ee",10218:"7cc7c4b1",10341:"a34d501f",10388:"b14f3fa2",10499:"e428c6d2",10527:"710884a6",10828:"5917547a",10967:"216ac574",11477:"b2f554cd",11539:"77745b3d",11657:"1a566584",11713:"a7023ddc",12072:"eee10519",12431:"5b37fdc8",12561:"2670bca3",12979:"2ee9677b",13050:"5f82aa37",13107:"cd7c5b9a",13134:"eba5f9c4",13143:"04bdf1ac",13244:"0f7894ab",13479:"8a12cfa4",13703:"2ea0638b",13751:"3720c009",13755:"54f9b777",13832:"07aed5a5",13948:"f32700a0",14065:"876124f9",14272:"50aee6de",14516:"abaaa1fe",14860:"e37a6402",14910:"51c20031",15287:"333c80e1",15450:"466720ab",15665:"908165ba",16274:"8625a1ce",16511:"83d17af4",16594:"7cc92f5c",16742:"4c05f83b",16884:"c2352a99",17167:"a184b6b2",17187:"b9f50d96",17234:"bc244d90",17284:"2cf4430d",17353:"3df00f5b",17542:"66d63bfc",17625:"9a3ec700",17951:"2d364229",18139:"43dc7314",18471:"6423b631",18674:"48d82b2e",18730:"d80dfec6",18855:"9968f92c",18907:"4f9fd1aa",19001:"41664cec",19096:"856315e7",19267:"b0f3eaa6",19468:"53baf039",19881:"70b31b37",20026:"21a12340",20490:"f16124a0",20522:"b7f5bbd5",20709:"6c7c2e71",20943:"9b480441",21065:"966e982b",21405:"948b0dab",21602:"68a93d86",22215:"c2471b2c",22636:"b96a8a04",23057:"3ab15d88",23851:"c9177f39",23946:"af6f9f26",24079:"87cf41e2",24153:"280df7e5",24438:"74337923",24825:"d28aee8d",24938:"ba1b8836",24958:"c38140d4",25048:"bf275373",25077:"dff35117",25146:"cac1e9bc",25246:"4eb6e5ee",25356:"94b02a9f",25757:"8f152d3b",26074:"2b9753f8",26107:"c5c3ab65",26234:"9154a6bd",26802:"52690743",26866:"d4051e29",27307:"bef57165",27377:"6f6118a9",27598:"8837ae6a",27624:"678743b7",27657:"970236dc",27693:"8a0722c3",27872:"5771c448",27918:"17896441",27991:"dc1e40d7",28267:"7a3788d1",28441:"bd518af2",28863:"6e916c0f",29025:"91b65c41",29231:"0c159898",29514:"1be78505",29893:"9c983a1e",29898:"db672e8f",30010:"778574bb",30021:"805f29de",30765:"2afb85b6",30870:"b571d381",31034:"5e40d2f9",31105:"dee797b6",31210:"eee5032f",31460:"f464b99a",31495:"fb75c206",31503:"e4594a63",31508:"41f5a6d2",31532:"1f5d6a30",31680:"fa2f5847",31684:"09d8c3a4",32089:"4470087f",32118:"fb1218a9",32185:"51fa421a",32227:"53424860",32236:"d1513e70",32367:"c00b49ad",32523:"05b3e639",32751:"a7c1a0ec",32887:"eb1549e9",33367:"1d9261ac",33492:"1bca3249",33560:"818823b9",33763:"fff7b6e8",33893:"8422caaa",34224:"37d4f123",34353:"6580ced9",34610:"eea5f367",34643:"65df3d35",34817:"f6773039",35032:"046172dc",35328:"ad76bf80",35420:"51d0de41",35456:"8e7d50a2",35514:"5ab197a5",35600:"2c3c2ea6",35693:"68d19d38",35707:"2e1d0e00",35971:"eae3663a",36093:"69bdd21e",36129:"ace962cc",36298:"3b500f01",36565:"f77a6ffd",36882:"76f084ae",37039:"2520d203",37259:"bf8a911c",37410:"20a79681",38399:"4d8c07c4",38659:"190c673d",38758:"311f287b",38814:"a7a0ecb6",38890:"a40db232",38933:"7aecf381",39289:"8137d071",39633:"a546ef4e",39792:"f7c1c183",39976:"0861ade5",40116:"cf5d68e3",40206:"cfdaf306",40335:"bc34ddf5",40414:"e35b48a1",40512:"5845ef18",40561:"ead3ade5",40705:"b54b617c",40968:"cd50e9d9",40992:"aabbbc7e",41099:"a63939e6",41115:"0c77509b",41772:"1137ff4c",42140:"966b40b3",42150:"d13c5bfb",42479:"d039dc3f",42553:"531ae155",42632:"1e6a2ef9",43257:"2f1aac5b",43397:"ed17fbb9",43611:"291bb016",43956:"5534efc2",43976:"17ca8484",44008:"4ea65622",44043:"85bf98de",44265:"0fba09c7",44482:"18dd72b8",44544:"1866e095",44801:"8c3e10eb",44849:"9874d022",45037:"23992941",45141:"9969e5f7",45290:"6bb68e89",45327:"26e75e35",45389:"99fb9804",45589:"80900647",45602:"76bc5640",45661:"ffa367d4",45843:"efcf4ea7",45907:"57023425",46103:"ccc49370",46402:"9bfad1fd",46750:"82c182bc",46871:"1e131061",46939:"a3ba5b60",47025:"29707690",47176:"4fc9a01a",47224:"48988e0b",47371:"73d417a5",47429:"aff75f73",47760:"d3830ad4",47871:"541d169a",47953:"b5168e69",48360:"08bd5166",48493:"6d268c49",48510:"0a85ff3c",48610:"6875c492",48932:"248e03f5",48934:"afbc56b2",48951:"1a083444",48983:"2497064c",49073:"d32b6b2b",49352:"c1b1e234",49526:"fcd50b8b",49836:"afad409e",49933:"2e786fdc",50683:"ba7181fd",50765:"42e87eeb",50801:"631037e5",50947:"e5e4671e",50996:"9db1f0be",51334:"95aa0c9c",51358:"dfd0736f",51669:"b8401e80",51969:"b5a5e0cb",52057:"ed9f1119",52066:"5ca5940e",52341:"bcf26692",52535:"814f3328",52791:"24188f33",52825:"5e082069",52989:"6a2e0576",53233:"a56c6b7a",53249:"cddd8399",53325:"1af30dc4",53414:"71662ff9",53544:"05f3e170",53550:"6e8a7928",53608:"9e4087bc",53674:"91a4177b",53981:"dbdb3f24",54086:"93ca4beb",54118:"04b1c040",54185:"c6dac06e",54202:"fd400683",54351:"b0c58f26",54827:"d02ee2fb",55212:"4e5616f0",55604:"9b55b2aa",55739:"7ffe8452",55844:"3c939a9c",55901:"437a7bc9",56054:"5f35a0de",56436:"345c38fd",56556:"cc321d97",56592:"55c09602",56637:"f430c6df",56681:"c7bda2e7",56733:"a5bc72c7",57186:"d98b6f22",57242:"56b8ac01",57520:"eebdc9c6",57626:"c0917cb8",57791:"a76d6c80",58283:"7c24e110",58284:"73f2c183",58326:"e15bcb33",58375:"38c0935c",59273:"4449d5f1",59289:"b49531e7",59310:"3cd7ddbe",59591:"3a53518b",59656:"1f29c771",59732:"f14812ff",59801:"e950a7f9",60002:"9bbf01c8",60045:"48718d21",60109:"353f3947",60195:"7d7dfbbe",60591:"f99625e8",60851:"6e38ab13",61285:"8661c2de",61428:"a39a9928",61551:"3195a7b0",61689:"dc79b1e9",61854:"adb71217",62039:"2e0f5cec",62111:"8c4b8e49",62121:"9a0fbc46",62226:"68da338b",62316:"bfea878c",62394:"9ca7809d",62829:"cfea7194",63020:"e0d4d0dd",63058:"3411059c",63107:"021a310b",63454:"dee130d7",63818:"8cec74d9",64013:"01a85c17",64063:"29b7f3be",64107:"cc1d4c18",64243:"88d31c16",64416:"dd194dbe",64422:"014a5837",64519:"8518a6d3",64599:"563ab102",64873:"e32089ea",64884:"2576ff29",64985:"3096f953",65078:"d182fb80",65112:"4ffbe17f",65444:"e39b4679",65467:"7beec960",65477:"554493c6",65574:"528e29f1",65628:"3b7a3f3e",65655:"bebc3ac6",65842:"399d48da",65932:"37daacb8",66170:"933c02a1",66226:"5fa70989",66277:"1984d11b",66562:"e73f859e",66627:"c16232bc",66778:"a1463431",66954:"d9cecb84",67153:"aefd1ce5",67285:"107b70ed",67301:"28b7232a",67442:"dac925f7",67492:"c34dd313",67493:"0e9a9e55",67525:"eb3832f3",67541:"7fdbf36e",67664:"a57f4178",67910:"4509e610",67982:"8a9e1376",68045:"a0fe705b",68097:"3ddf8900",68217:"ac5779b8",68398:"e9457a88",68539:"2f338473",68616:"4b35450a",68626:"41f3d1d4",68869:"5f5f4d9b",68952:"e323c1ba",69125:"70ee9ef1",69437:"5b5bbdd7",69468:"5c36283e",69618:"b3406135",69748:"6c4b5682",70033:"a2b6e306",70137:"9355e337",70144:"f67fe035",70525:"ba97a692",70625:"96e2cccf",70710:"25eabba4",70714:"5f92cd96",70831:"0aa128fb",70956:"8dd37400",71247:"5f7a42fa",71843:"dbca4a19",72129:"8532ad45",72498:"7ab3d102",72975:"408f120a",73028:"11295d65",73255:"fe07bdbe",73460:"089f961f",73464:"03021317",73566:"3dddbf8e",73657:"4f604ceb",74075:"fae86d7e",74095:"09b52532",74121:"55960ee5",74270:"3ec232d9",74436:"c875b05b",74549:"6c79c040",74692:"d93bf326",74783:"83bcd91d",74987:"769b7ddc",75024:"467cdcc7",75070:"3716aceb",75189:"d6b55977",75492:"240cbf48",75626:"af584b81",76306:"9f566abb",76374:"a831a863",77142:"f5c46a41",77258:"e98ff5dc",77269:"9508783d",77308:"dcbbe415",77408:"9f7b1adc",77518:"9e1c8ba3",77634:"32d2836b",77736:"3760967f",77999:"5657b1a5",78029:"77816f9e",78039:"9ac88ecc",78060:"360f41e6",78250:"b6f8819f",78273:"204b800a",78504:"c7719545",78785:"8d6cbe01",78793:"074f5eeb",79208:"02163d1c",79311:"d4bf935c",79807:"c4115680",79915:"d78d712a",80053:"935f2afb",80077:"e58ee7f4",80203:"ae644a35",80308:"441a7f95",80484:"5d613655",80576:"c2a9f04f",80732:"84e90c5f",80957:"1b338be2",81005:"fac6f2d4",81377:"c64e21de",81489:"27ca247f",81714:"d0342500",81842:"0ce26544",81926:"8f020eac",82060:"f5df6522",82241:"ead137ee",82545:"3d828cc6",82815:"9377a004",83175:"aa1e90ab",83390:"34759613",83440:"b1bf7260",83552:"e726b67f",83782:"261d0ea0",83792:"438501e2",83869:"28bf1441",83890:"28dfc6fb",84041:"ee6959ee",84128:"a09c2993",84217:"1d3c0678",84621:"34ac2676",84633:"e3bd683e",84849:"2c31ff43",84866:"22c54347",85050:"86f22513",85115:"44604fa9",85136:"8be741dd",85232:"d5927b70",85435:"0ad283e5",85449:"3fc514d2",85481:"c3ce6b05",85493:"cbc19f4b",85591:"3733e62b",85637:"96991cca",85760:"5e8c8a07",86561:"42c92bcd",86599:"117f37cd",86646:"33b6fdcc",86894:"2213fc24",87054:"9dd8a0d2",87280:"593ac3b1",87388:"0260d845",87456:"e29698a7",87464:"8aa67d88",87682:"b91032df",87709:"96c3c139",87754:"c0670030",87819:"802ad713",88187:"2c34c550",88433:"be8f9bda",88532:"755af260",88838:"9c38ddd2",89459:"4b002b59",89480:"79afda13",89670:"e4102989",89738:"c67d2a5e",89749:"8a8aa245",89778:"23b9c839",89801:"ce22cbd0",90064:"0b979966",90072:"c7ffffeb",90125:"fb16f602",90239:"25b6cbf3",90250:"36ef0f87",90371:"5aff8b89",90533:"b2b675dd",90728:"8eef043b",90814:"cff6a186",91360:"394d5a7a",91375:"02daaa8d",91610:"9d3e2903",91679:"ac7622a2",91799:"caaa2886",91951:"78c9ae28",91983:"64ba6d0f",92074:"8903e609",92200:"9ad029fd",92273:"b2171041",92290:"ff2037b4",92469:"18c1b595",92518:"94e63a1a",92519:"a94c1f1c",92706:"eb60262c",92715:"f0aa3789",92872:"0b1ac180",93042:"6cf48756",93089:"a6aa9e1f",93119:"027c2617",93171:"890e518c",93264:"2cdd8fc8",93316:"bc1274a5",93430:"dac27efb",93570:"87153e45",93586:"35ca84ad",93810:"be9aa551",93954:"7d75cf68",94073:"05a474a1",94507:"555c312a",94532:"4bcdbd8b",94551:"311a1527",94629:"caa9028b",94915:"83688337",94976:"c2340238",95159:"553f28ff",95597:"b0207dc0",95869:"7d88342b",95980:"7043a272",96170:"72790c29",96241:"0bec58d7",96476:"4ff8b690",96477:"e88d5fb1",96542:"cada9e63",96647:"4af5dc2e",96698:"e63e6ab4",96701:"72b06b07",96736:"659d5cde",97616:"306a8c6c",97915:"90e4ca75",97948:"8ddb8ae8",98049:"ef6c6ab7",98129:"38e24728",98311:"72df85c1",98701:"f25316fd",98981:"bab44dbb",99615:"d3701aa3",99784:"30778cf0",99874:"07a5f688",99923:"74e0d570",99924:"df203c0f",99928:"43220a19"}[e]||e)+"."+{328:"2474f8e0",359:"f7b4ac85",639:"638b6a46",1012:"70f1d962",1226:"ed34399c",1362:"ab7e1ec5",1402:"5bc6edcc",2170:"ebeebe3c",2275:"6d1db94e",2647:"442da986",2698:"0730c0af",2699:"65721931",2794:"c1a68b30",2814:"a92cdaeb",3009:"21284465",3358:"df766fe2",3587:"e046259c",3671:"14e36a4d",3829:"2a47bdd2",4065:"79e088d3",4280:"2100fb18",4380:"358cf740",4443:"f54a719f",4452:"7f0d1de7",4484:"36e9e778",4591:"06d9fd0b",4736:"95203970",4984:"8df42d59",5140:"9541a33c",5142:"691faa50",5233:"ea86fea6",5705:"79ff171c",5909:"e77f8473",6288:"1e0bf6ea",6339:"71687c00",6652:"07533954",6959:"00ccd71f",7015:"05fff3ee",7029:"8812a8be",7058:"1bffe547",7342:"ce97d43f",7479:"f5f691b4",7586:"052eb7c6",7766:"728fc0bc",8301:"59a8c6bf",8398:"e309a3c2",8605:"bfa0c905",8785:"fa277bf8",8890:"1932c262",9235:"e318bcd5",9409:"17740e18",9617:"56b2b95d",9646:"f273a2d5",9782:"03300db1",9828:"02da4631",9991:"5aeac15e",10001:"5c87cb43",10201:"f27835d2",10218:"d01b2999",10341:"9bb4f8b8",10388:"eeb9709a",10499:"5a924c44",10527:"9843479b",10828:"a4553d0d",10967:"bdc81e1e",10972:"bfd6544a",11477:"33c87ff5",11539:"92360ac1",11657:"a6240e17",11713:"cd9cef4e",12072:"2b42fa6d",12431:"df0fd6b1",12561:"acba7467",12979:"eddb07fb",13050:"1ce768ca",13107:"82ea8621",13134:"76625832",13143:"c388d623",13244:"71f6240e",13479:"45266115",13703:"e6202db6",13751:"1f2d2496",13755:"1a67012a",13832:"742c41ef",13948:"183ab949",14065:"fc08bea3",14272:"45e9070e",14516:"70bdbe7e",14860:"e0f98972",14910:"4d7f4085",15287:"5889feb5",15450:"39242097",15665:"a614478f",16274:"45d17c34",16511:"2a3bafe5",16594:"73515851",16742:"59b8b60f",16884:"3cab5ebc",17167:"3ff11a94",17187:"ecd6e604",17234:"2fc0afee",17284:"d2fdc847",17353:"3d294842",17542:"fb94b0ca",17625:"125374d0",17951:"1e958367",18139:"695ac6ad",18471:"595921ec",18674:"1855f473",18730:"3d5c8ba9",18855:"cd86d191",18907:"ae33969e",19001:"7281a5cc",19096:"739e7244",19267:"f7eb1c5e",19468:"1b41c252",19881:"a9c9b079",20026:"76eee45c",20490:"4f98e0f2",20522:"07b86566",20709:"002bb2a0",20943:"425707be",21065:"a41e193b",21405:"8cad1850",21602:"61750831",22215:"a6a37ed8",22636:"4c65f9a0",23057:"1ad7eda9",23851:"aaf0b50e",23946:"ad78fa4c",24079:"3a7c022b",24153:"a85450b6",24438:"f1ca4a2b",24608:"9c4d2d11",24825:"817ca35c",24938:"fdbcbb23",24958:"bde97521",25048:"87f69e44",25077:"39f84e06",25146:"0997c567",25246:"b5a11b9d",25356:"e744b233",25757:"47e735a8",26074:"cc3aa371",26107:"b5fdfa9c",26234:"ce958100",26802:"c15ebab9",26866:"b52b946a",27307:"a5de26aa",27377:"10548e4b",27598:"8b29878d",27624:"5389827e",27657:"b399705d",27693:"b34f260b",27872:"694cc66b",27918:"d0a8d8ed",27991:"4ac447ee",28267:"922c9c35",28441:"ff1cda8f",28863:"e78497cb",29025:"ad1779fa",29231:"6939bb2d",29514:"c2b68c2b",29893:"6400f7b4",29898:"fabe3892",30010:"a251ece7",30021:"8103d2bc",30765:"7d6a817f",30870:"0a7ea360",31034:"657c9eb3",31105:"601dcbb2",31210:"99bebbdf",31460:"73b8518f",31495:"c91216ad",31503:"2c1b93ad",31508:"a3894107",31532:"8a4f7677",31680:"823a7231",31684:"b02c9aa6",32089:"2e3743ec",32118:"c4b5d617",32185:"e3b8eb14",32227:"4cbb576e",32236:"d74dfb6a",32367:"e00f9da9",32523:"4e711026",32751:"35cffd82",32887:"32d9e8e2",33367:"2e74a184",33492:"18242563",33560:"3d37821a",33763:"13ea4824",33893:"9c209ddd",34224:"b1754939",34353:"5df010f9",34610:"1714674b",34643:"dea6b928",34817:"f5960bf6",35032:"593d5472",35328:"bcd6e1e2",35420:"979421bb",35456:"e55e1167",35514:"dbdedade",35600:"350ac85a",35693:"9fe9997d",35707:"71cdb4d7",35971:"6fafd2f6",36093:"0ac5190c",36129:"d4d41b5b",36298:"3a36bf4e",36565:"ff24bae5",36882:"adaced3e",37039:"fb0272bc",37259:"f4c9441f",37410:"7a21e8ee",38399:"282f4b75",38659:"83321961",38758:"43634279",38814:"82effc26",38890:"3685fb76",38933:"2aef4719",39289:"b1fa1f56",39633:"00747b08",39792:"105fd077",39976:"df33ef04",40116:"940177fa",40206:"366c057e",40335:"62c09e15",40414:"02e194ae",40512:"894e2f8e",40561:"4b5a46ab",40705:"8f8b91e7",40968:"cb073fc8",40992:"9759ebdb",41099:"86c883cf",41115:"49dbda5f",41772:"396480ae",42140:"f4b0ef01",42150:"2e2273df",42479:"64d658ec",42553:"a62ebfeb",42632:"5305238a",43257:"776c3f9d",43397:"0a3344ae",43611:"19e99f2b",43956:"55f22f88",43976:"683d82c7",44008:"a5952380",44043:"dd23ea4d",44265:"c07ff1fb",44482:"ff4e5b9b",44544:"0cb89fa6",44801:"7bcab8c9",44849:"fd8a19d4",45037:"c1a9a579",45141:"416ea356",45290:"6e6bbb42",45327:"d9b995e6",45389:"2266523d",45589:"c6032b91",45602:"55d99a93",45661:"6d358b1a",45843:"3ce6215c",45907:"3a7096a4",46103:"c269c6ad",46402:"4bfe5322",46750:"2d2b65a4",46871:"6a0d45ec",46939:"af368ed5",46945:"4deecdf7",47025:"b2455dd2",47176:"d6a48d9b",47224:"8350e7bb",47371:"75ac95a0",47429:"fe4f0b48",47760:"9f080d13",47871:"775ce29b",47953:"ff7ca46e",48360:"704c48e5",48493:"a87b73be",48510:"9d33d44b",48610:"884dfaf4",48932:"0a2ccfb1",48934:"10f3a163",48951:"04e2ec14",48983:"99ada184",49073:"1b25eb30",49352:"43cad4ba",49526:"2e0b689f",49836:"cfaa7ef2",49933:"48463dd2",50683:"a872d20e",50765:"39abfd54",50801:"f8bc35d7",50947:"0cc3e088",50996:"79164d6e",51334:"f84c3fb8",51358:"b52f814a",51669:"781523b4",51969:"04895f4a",52057:"d414b0a4",52066:"13035838",52341:"73eb8e32",52535:"813fdab8",52791:"80206f7e",52825:"58568080",52989:"a3cd670c",53233:"861129ab",53249:"a92dedd1",53325:"8c55345d",53414:"2d023348",53544:"be4aee5c",53550:"38b2fc26",53608:"51fbf6e8",53674:"d66956ff",53981:"0501d788",54086:"fd0b8865",54118:"4f07fe4c",54185:"eedaaa83",54202:"8704e433",54351:"d2e4f02c",54827:"221219b1",55040:"d1cb509f",55212:"a98669a3",55604:"da90566d",55739:"c16587c9",55844:"8af1b4e3",55901:"ece48539",56054:"ea7982d5",56436:"126d99cf",56556:"843e3673",56592:"97df0ab9",56637:"c44dca55",56681:"bd27ea90",56733:"cbe4865f",57186:"24d8f4dc",57242:"c56ec24e",57520:"d7c702e7",57626:"b28cc499",57791:"3f2df52f",58283:"582213c5",58284:"02eb845f",58326:"b50a1ad3",58375:"299a811d",59273:"20dcc2e0",59289:"665c9727",59310:"60d2b2a2",59591:"db1cce21",59656:"69668bd4",59732:"ebea58ca",59801:"80e44253",60002:"953a68a1",60045:"32fa5591",60109:"5e7dacc4",60195:"0977f346",60591:"c5eb60b7",60851:"e2d6a9ba",61285:"9125b074",61428:"5d865764",61551:"32bdbf62",61689:"f27a7a8d",61854:"f7f140d1",62039:"ab30f483",62111:"1cd62ed7",62121:"c0d66c5c",62226:"ce1b52d2",62316:"2e53f7c7",62394:"127504f9",62829:"e29b823a",63020:"2061029d",63058:"bffba846",63107:"2452e806",63454:"a5822721",63818:"d881a858",64013:"75f40f00",64063:"9ba6cd49",64107:"bbac9179",64243:"64199d87",64416:"66cc4e72",64422:"7f37a95e",64519:"88bf6eb9",64599:"bee6e2cd",64873:"2869890f",64884:"10798e59",64985:"f6241921",65078:"7777c109",65112:"9205de00",65444:"8a6f349f",65467:"ece7bf27",65477:"9e2aac8b",65574:"a3fd16aa",65628:"c261c2b6",65655:"eb3568af",65842:"ec8d426c",65932:"ae8658cd",66170:"6cceac36",66226:"8047968b",66277:"531643ed",66562:"d1db0526",66627:"ccccff81",66778:"87cc0099",66954:"5c118f9f",67153:"2bd3f770",67285:"6e3629f5",67301:"35652e99",67442:"77572a1a",67492:"7473edac",67493:"bb01ace5",67525:"74efa83d",67541:"498e1050",67664:"6e803a88",67910:"030fcd0a",67982:"90e28c3e",68045:"60b02929",68097:"064ef2b9",68217:"51ed944b",68398:"7aec6936",68539:"c61fff36",68616:"e453b2b9",68626:"abc91477",68869:"aa78a90d",68952:"4b2959bc",69125:"bbaaa208",69437:"801cd286",69468:"04631c0e",69618:"93ac4698",69748:"92a03eee",70033:"c81dd0c8",70137:"368a5099",70144:"92d738b0",70525:"23197b34",70625:"7957c2c3",70710:"50baea1c",70714:"96cdf24d",70831:"99acb492",70956:"cd763dcb",71247:"8b4ea1ef",71843:"21cf01bf",72129:"c8ec0c77",72498:"5d7c0346",72975:"5680c01b",73028:"688cc111",73255:"42586d2b",73460:"5f5bb905",73464:"3900e296",73566:"e4f00ea4",73657:"9ec77816",74075:"4055cc8e",74095:"151ea8e0",74121:"89f11c39",74270:"0cfc0b2e",74436:"49293339",74549:"ac95ce6c",74692:"1eaaee6e",74783:"b8d70a26",74987:"8fa80799",75024:"74c9462d",75070:"6a69a2f3",75189:"53ebff88",75492:"449a8d40",75626:"b82ec7c1",76306:"58f424ba",76374:"e84b10de",77142:"58e97215",77258:"7d12cb50",77269:"bb2b45c0",77308:"b9bacdeb",77408:"6ab376ff",77518:"77ebdecc",77634:"0c20142e",77736:"640151b2",77999:"5a2db566",78029:"84c90162",78039:"87a215ee",78060:"9273d2d9",78250:"83166b8e",78273:"8fdb4e87",78504:"b7737141",78785:"1961bedc",78793:"459e3f97",79208:"9f4dc055",79311:"d55e217d",79807:"11748ff0",79915:"cfd9d0b4",80053:"e882ea03",80077:"39adf45f",80203:"09ec72d0",80308:"426a8693",80484:"3e4b8dd7",80576:"02760f9d",80732:"f9e2201b",80957:"4fd62edd",81005:"fbb3df04",81377:"b564a3f3",81489:"20017eac",81714:"406f0762",81842:"de2dc423",81926:"5a6e4cc3",82060:"43ad112d",82241:"b48d5c7e",82545:"92663992",82815:"e6bb7000",83175:"65ab5364",83390:"bfb8ce7f",83440:"43e78a9e",83552:"ab1cd766",83782:"a73df2b7",83792:"f5d52c8a",83869:"c4848101",83890:"9f8a4ccb",84041:"87972b88",84128:"3f768f80",84217:"7e5fd77d",84621:"9ec906be",84633:"d7c3d271",84849:"f38a38a5",84866:"5fd157bf",85050:"18d3c9cb",85115:"710d4a0c",85136:"8a0afea4",85232:"58fb1e51",85435:"15019197",85449:"31077f7f",85481:"aec9457a",85493:"29f86198",85591:"f933c583",85637:"a5671c75",85760:"f6e0a06a",86561:"c3c75307",86599:"7fbea23f",86646:"b4060782",86894:"d2081f40",87054:"e59e7145",87280:"de394ad9",87388:"93580f74",87456:"c4c4342f",87464:"cbddcd27",87682:"4a113f2e",87709:"e08cb997",87754:"0d4a573e",87819:"9cda9b8b",88187:"33cb0fba",88433:"70b6fd4d",88532:"3a9112b8",88838:"ce6821fb",89459:"81b65887",89480:"79950ca4",89670:"8898bde3",89738:"79c0042d",89749:"dd663aed",89778:"85cc1d55",89801:"63b959ff",90064:"47ace020",90072:"e8a8f895",90125:"94b03ee0",90239:"58d3e98f",90250:"128dc57e",90371:"e4037c42",90533:"b8164802",90728:"471deafa",90814:"f59a7fa7",91360:"168ad11f",91375:"a293ad8a",91610:"9bace73b",91679:"9d380da4",91799:"255a9367",91951:"9047373d",91983:"a1d5ce3c",92074:"6b207a3b",92200:"2c247713",92273:"c616aef0",92290:"b3d7d750",92469:"89da1599",92518:"ac395260",92519:"aa4dfbd2",92706:"bb0be4f8",92715:"3e1cf124",92872:"fa5b18bf",93042:"a01f354a",93089:"1ea8fcbb",93119:"3c04575c",93171:"cfe43121",93264:"bddb9a40",93316:"99fd8323",93430:"d92b7d53",93570:"7e2183ff",93586:"2bdb92d3",93810:"92610bb8",93954:"dc9c9686",94073:"fe70bc9b",94507:"6d2364b5",94532:"e2dcc3c9",94551:"dfdd8a66",94629:"2eaf9893",94915:"4be70ab3",94976:"8f29526e",95159:"346a075c",95597:"0f252f81",95869:"060bac3c",95980:"e3a399ad",96170:"256bc801",96241:"47e8e610",96476:"57f1e942",96477:"36284a74",96542:"eee928eb",96647:"efbfbff6",96698:"1506ccce",96701:"aac9cc11",96736:"436b2353",97616:"ffab3f59",97915:"0b4f047d",97948:"96b083c6",98049:"95b7d2f3",98129:"1956ed49",98311:"675ef83e",98701:"75250e43",98981:"c903e5f1",99615:"0c948941",99784:"fff93b62",99874:"fa81c65b",99923:"c6bbe583",99924:"5ffd65fa",99928:"fc84903d"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.92d87943.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,a){return Object.prototype.hasOwnProperty.call(e,a)},f={},d="linkis-web-apache:",n.l=function(e,a,c,b){if(f[e])f[e].push(a);else{var t,r;if(void 0!==c)for(var o=document.getElementsByTagName("script"),i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](c[r])}))?c.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},n.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(a,{a:a}),a},c=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((function(a){b[a]=function(){return e[a]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,a){for(var c in a)n.o(a,c)&&!n.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(a,c){return n.f[c](e,a),a}),[]))},n.u=function(e){return"assets/js/"+({328:"4cc9882f",359:"2124c49e",639:"1ef83aab",1012:"b359ab7d",1226:"32c11423",1362:"6d204534",1402:"45047182",2170:"5526e2c8",2275:"f2e7bc47",2647:"d0685248",2698:"a534d5a4",2699:"6b4f6f6d",2794:"c976da7c",2814:"68f4675e",3009:"8e27a41e",3358:"ba75a7e1",3587:"45350984",3671:"6248a31d",4065:"217deffc",4280:"781d1b70",4380:"02f6a4b8",4443:"49ee9fc2",4452:"b047bf19",4484:"a854c309",4591:"77ddc047",4736:"7e21a02f",4984:"07edeecc",5140:"c3c3ee8a",5142:"d77f29dd",5233:"f2678917",5705:"166c3354",5909:"844135d6",6288:"e59b4707",6339:"e4bc1c20",6652:"78060cbc",6959:"322e6455",7015:"33f34b53",7029:"9b73e49d",7058:"d89de855",7342:"16b1aeb6",7479:"4c92610f",7586:"3e78e8ed",7766:"af1aaf24",8301:"a67041c8",8398:"6131eab8",8605:"5be510ab",8785:"127364d6",8890:"798fb933",9235:"e9ffd44c",9409:"15a0842e",9617:"cf38eb0d",9646:"4fdf3839",9782:"55dfda34",9828:"787028e7",9991:"d3b38238",10001:"8eb4e46b",10201:"57cd18ee",10218:"7cc7c4b1",10341:"a34d501f",10388:"b14f3fa2",10499:"e428c6d2",10527:"710884a6",10828:"5917547a",10967:"216ac574",11477:"b2f554cd",11539:"77745b3d",11657:"1a566584",11713:"a7023ddc",12072:"eee10519",12431:"5b37fdc8",12561:"2670bca3",12979:"2ee9677b",13050:"5f82aa37",13107:"cd7c5b9a",13134:"eba5f9c4",13143:"04bdf1ac",13244:"0f7894ab",13479:"8a12cfa4",13703:"2ea0638b",13751:"3720c009",13755:"54f9b777",13832:"07aed5a5",13948:"f32700a0",14065:"876124f9",14272:"50aee6de",14516:"abaaa1fe",14860:"e37a6402",14910:"51c20031",15287:"333c80e1",15450:"466720ab",15665:"908165ba",16274:"8625a1ce",16511:"83d17af4",16594:"7cc92f5c",16742:"4c05f83b",16884:"c2352a99",17167:"a184b6b2",17187:"b9f50d96",17234:"bc244d90",17284:"2cf4430d",17353:"3df00f5b",17542:"66d63bfc",17625:"9a3ec700",17951:"2d364229",18139:"43dc7314",18471:"6423b631",18674:"48d82b2e",18730:"d80dfec6",18855:"9968f92c",18907:"4f9fd1aa",19001:"41664cec",19096:"856315e7",19267:"b0f3eaa6",19468:"53baf039",19881:"70b31b37",20026:"21a12340",20490:"f16124a0",20522:"b7f5bbd5",20709:"6c7c2e71",20943:"9b480441",21065:"966e982b",21405:"948b0dab",21602:"68a93d86",22215:"c2471b2c",22636:"b96a8a04",23057:"3ab15d88",23851:"c9177f39",23946:"af6f9f26",24079:"87cf41e2",24153:"280df7e5",24438:"74337923",24825:"d28aee8d",24938:"ba1b8836",24958:"c38140d4",25048:"bf275373",25077:"dff35117",25146:"cac1e9bc",25246:"4eb6e5ee",25356:"94b02a9f",25757:"8f152d3b",26074:"2b9753f8",26107:"c5c3ab65",26234:"9154a6bd",26802:"52690743",26866:"d4051e29",27307:"bef57165",27377:"6f6118a9",27598:"8837ae6a",27624:"678743b7",27657:"970236dc",27693:"8a0722c3",27872:"5771c448",27918:"17896441",27991:"dc1e40d7",28267:"7a3788d1",28441:"bd518af2",28863:"6e916c0f",29025:"91b65c41",29231:"0c159898",29514:"1be78505",29893:"9c983a1e",29898:"db672e8f",30010:"778574bb",30021:"805f29de",30765:"2afb85b6",30870:"b571d381",31034:"5e40d2f9",31105:"dee797b6",31210:"eee5032f",31460:"f464b99a",31495:"fb75c206",31503:"e4594a63",31508:"41f5a6d2",31532:"1f5d6a30",31680:"fa2f5847",31684:"09d8c3a4",32089:"4470087f",32118:"fb1218a9",32185:"51fa421a",32227:"53424860",32236:"d1513e70",32367:"c00b49ad",32523:"05b3e639",32751:"a7c1a0ec",32887:"eb1549e9",33367:"1d9261ac",33492:"1bca3249",33560:"818823b9",33763:"fff7b6e8",33893:"8422caaa",34224:"37d4f123",34353:"6580ced9",34610:"eea5f367",34643:"65df3d35",34817:"f6773039",35032:"046172dc",35328:"ad76bf80",35420:"51d0de41",35456:"8e7d50a2",35514:"5ab197a5",35600:"2c3c2ea6",35693:"68d19d38",35707:"2e1d0e00",35971:"eae3663a",36093:"69bdd21e",36129:"ace962cc",36298:"3b500f01",36565:"f77a6ffd",36882:"76f084ae",37039:"2520d203",37259:"bf8a911c",37410:"20a79681",38399:"4d8c07c4",38659:"190c673d",38758:"311f287b",38814:"a7a0ecb6",38890:"a40db232",38933:"7aecf381",39289:"8137d071",39633:"a546ef4e",39792:"f7c1c183",39976:"0861ade5",40116:"cf5d68e3",40206:"cfdaf306",40335:"bc34ddf5",40414:"e35b48a1",40512:"5845ef18",40561:"ead3ade5",40705:"b54b617c",40968:"cd50e9d9",40992:"aabbbc7e",41099:"a63939e6",41115:"0c77509b",41772:"1137ff4c",42140:"966b40b3",42150:"d13c5bfb",42479:"d039dc3f",42553:"531ae155",42632:"1e6a2ef9",43257:"2f1aac5b",43397:"ed17fbb9",43611:"291bb016",43956:"5534efc2",43976:"17ca8484",44008:"4ea65622",44043:"85bf98de",44265:"0fba09c7",44482:"18dd72b8",44544:"1866e095",44801:"8c3e10eb",44849:"9874d022",45037:"23992941",45141:"9969e5f7",45290:"6bb68e89",45327:"26e75e35",45389:"99fb9804",45589:"80900647",45602:"76bc5640",45661:"ffa367d4",45843:"efcf4ea7",45907:"57023425",46103:"ccc49370",46402:"9bfad1fd",46750:"82c182bc",46871:"1e131061",46939:"a3ba5b60",47025:"29707690",47176:"4fc9a01a",47224:"48988e0b",47371:"73d417a5",47429:"aff75f73",47760:"d3830ad4",47871:"541d169a",47953:"b5168e69",48360:"08bd5166",48493:"6d268c49",48510:"0a85ff3c",48610:"6875c492",48932:"248e03f5",48934:"afbc56b2",48951:"1a083444",48983:"2497064c",49073:"d32b6b2b",49352:"c1b1e234",49526:"fcd50b8b",49836:"afad409e",49933:"2e786fdc",50683:"ba7181fd",50765:"42e87eeb",50801:"631037e5",50947:"e5e4671e",50996:"9db1f0be",51334:"95aa0c9c",51358:"dfd0736f",51669:"b8401e80",51969:"b5a5e0cb",52057:"ed9f1119",52066:"5ca5940e",52341:"bcf26692",52535:"814f3328",52791:"24188f33",52825:"5e082069",52989:"6a2e0576",53233:"a56c6b7a",53249:"cddd8399",53325:"1af30dc4",53414:"71662ff9",53544:"05f3e170",53550:"6e8a7928",53608:"9e4087bc",53674:"91a4177b",53981:"dbdb3f24",54086:"93ca4beb",54118:"04b1c040",54185:"c6dac06e",54202:"fd400683",54351:"b0c58f26",54827:"d02ee2fb",55212:"4e5616f0",55604:"9b55b2aa",55739:"7ffe8452",55844:"3c939a9c",55901:"437a7bc9",56054:"5f35a0de",56436:"345c38fd",56556:"cc321d97",56592:"55c09602",56637:"f430c6df",56681:"c7bda2e7",56733:"a5bc72c7",57186:"d98b6f22",57242:"56b8ac01",57520:"eebdc9c6",57626:"c0917cb8",57791:"a76d6c80",58283:"7c24e110",58284:"73f2c183",58326:"e15bcb33",58375:"38c0935c",59273:"4449d5f1",59289:"b49531e7",59310:"3cd7ddbe",59591:"3a53518b",59656:"1f29c771",59732:"f14812ff",59801:"e950a7f9",60002:"9bbf01c8",60045:"48718d21",60109:"353f3947",60195:"7d7dfbbe",60591:"f99625e8",60851:"6e38ab13",61285:"8661c2de",61428:"a39a9928",61551:"3195a7b0",61689:"dc79b1e9",61854:"adb71217",62039:"2e0f5cec",62111:"8c4b8e49",62121:"9a0fbc46",62226:"68da338b",62316:"bfea878c",62394:"9ca7809d",62829:"cfea7194",63020:"e0d4d0dd",63058:"3411059c",63107:"021a310b",63454:"dee130d7",63818:"8cec74d9",64013:"01a85c17",64063:"29b7f3be",64107:"cc1d4c18",64243:"88d31c16",64416:"dd194dbe",64422:"014a5837",64519:"8518a6d3",64599:"563ab102",64873:"e32089ea",64884:"2576ff29",64985:"3096f953",65078:"d182fb80",65112:"4ffbe17f",65444:"e39b4679",65467:"7beec960",65477:"554493c6",65574:"528e29f1",65628:"3b7a3f3e",65655:"bebc3ac6",65842:"399d48da",65932:"37daacb8",66170:"933c02a1",66226:"5fa70989",66277:"1984d11b",66562:"e73f859e",66627:"c16232bc",66778:"a1463431",66954:"d9cecb84",67153:"aefd1ce5",67285:"107b70ed",67301:"28b7232a",67442:"dac925f7",67492:"c34dd313",67493:"0e9a9e55",67525:"eb3832f3",67541:"7fdbf36e",67664:"a57f4178",67910:"4509e610",67982:"8a9e1376",68045:"a0fe705b",68097:"3ddf8900",68217:"ac5779b8",68398:"e9457a88",68539:"2f338473",68616:"4b35450a",68626:"41f3d1d4",68869:"5f5f4d9b",68952:"e323c1ba",69125:"70ee9ef1",69437:"5b5bbdd7",69468:"5c36283e",69618:"b3406135",69748:"6c4b5682",70033:"a2b6e306",70137:"9355e337",70144:"f67fe035",70525:"ba97a692",70625:"96e2cccf",70710:"25eabba4",70714:"5f92cd96",70831:"0aa128fb",70956:"8dd37400",71247:"5f7a42fa",71843:"dbca4a19",72129:"8532ad45",72498:"7ab3d102",72975:"408f120a",73028:"11295d65",73255:"fe07bdbe",73460:"089f961f",73464:"03021317",73566:"3dddbf8e",73657:"4f604ceb",74075:"fae86d7e",74095:"09b52532",74121:"55960ee5",74270:"3ec232d9",74436:"c875b05b",74549:"6c79c040",74692:"d93bf326",74783:"83bcd91d",74987:"769b7ddc",75024:"467cdcc7",75070:"3716aceb",75189:"d6b55977",75492:"240cbf48",75626:"af584b81",76306:"9f566abb",76374:"a831a863",77142:"f5c46a41",77258:"e98ff5dc",77269:"9508783d",77308:"dcbbe415",77408:"9f7b1adc",77518:"9e1c8ba3",77634:"32d2836b",77736:"3760967f",77999:"5657b1a5",78029:"77816f9e",78039:"9ac88ecc",78060:"360f41e6",78250:"b6f8819f",78273:"204b800a",78504:"c7719545",78785:"8d6cbe01",78793:"074f5eeb",79208:"02163d1c",79311:"d4bf935c",79807:"c4115680",79915:"d78d712a",80053:"935f2afb",80077:"e58ee7f4",80203:"ae644a35",80308:"441a7f95",80484:"5d613655",80576:"c2a9f04f",80732:"84e90c5f",80957:"1b338be2",81005:"fac6f2d4",81377:"c64e21de",81489:"27ca247f",81714:"d0342500",81842:"0ce26544",81926:"8f020eac",82060:"f5df6522",82241:"ead137ee",82545:"3d828cc6",82815:"9377a004",83175:"aa1e90ab",83390:"34759613",83440:"b1bf7260",83552:"e726b67f",83782:"261d0ea0",83792:"438501e2",83869:"28bf1441",83890:"28dfc6fb",84041:"ee6959ee",84128:"a09c2993",84217:"1d3c0678",84621:"34ac2676",84633:"e3bd683e",84849:"2c31ff43",84866:"22c54347",85050:"86f22513",85115:"44604fa9",85136:"8be741dd",85232:"d5927b70",85435:"0ad283e5",85449:"3fc514d2",85481:"c3ce6b05",85493:"cbc19f4b",85591:"3733e62b",85637:"96991cca",85760:"5e8c8a07",86561:"42c92bcd",86599:"117f37cd",86646:"33b6fdcc",86894:"2213fc24",87054:"9dd8a0d2",87280:"593ac3b1",87388:"0260d845",87456:"e29698a7",87464:"8aa67d88",87682:"b91032df",87709:"96c3c139",87754:"c0670030",87819:"802ad713",88187:"2c34c550",88433:"be8f9bda",88532:"755af260",88838:"9c38ddd2",89459:"4b002b59",89480:"79afda13",89670:"e4102989",89738:"c67d2a5e",89749:"8a8aa245",89778:"23b9c839",89801:"ce22cbd0",90064:"0b979966",90072:"c7ffffeb",90125:"fb16f602",90239:"25b6cbf3",90250:"36ef0f87",90371:"5aff8b89",90533:"b2b675dd",90728:"8eef043b",90814:"cff6a186",91360:"394d5a7a",91375:"02daaa8d",91610:"9d3e2903",91679:"ac7622a2",91799:"caaa2886",91951:"78c9ae28",91983:"64ba6d0f",92074:"8903e609",92200:"9ad029fd",92273:"b2171041",92290:"ff2037b4",92469:"18c1b595",92518:"94e63a1a",92519:"a94c1f1c",92706:"eb60262c",92715:"f0aa3789",92872:"0b1ac180",93042:"6cf48756",93089:"a6aa9e1f",93119:"027c2617",93171:"890e518c",93264:"2cdd8fc8",93316:"bc1274a5",93430:"dac27efb",93570:"87153e45",93586:"35ca84ad",93810:"be9aa551",93954:"7d75cf68",94073:"05a474a1",94507:"555c312a",94532:"4bcdbd8b",94551:"311a1527",94629:"caa9028b",94915:"83688337",94976:"c2340238",95159:"553f28ff",95597:"b0207dc0",95869:"7d88342b",95980:"7043a272",96170:"72790c29",96241:"0bec58d7",96476:"4ff8b690",96477:"e88d5fb1",96542:"cada9e63",96647:"4af5dc2e",96698:"e63e6ab4",96701:"72b06b07",96736:"659d5cde",97616:"306a8c6c",97915:"90e4ca75",97948:"8ddb8ae8",98049:"ef6c6ab7",98129:"38e24728",98311:"72df85c1",98701:"f25316fd",98981:"bab44dbb",99615:"d3701aa3",99784:"30778cf0",99874:"07a5f688",99923:"74e0d570",99924:"df203c0f",99928:"43220a19"}[e]||e)+"."+{328:"2474f8e0",359:"f7b4ac85",639:"638b6a46",1012:"70f1d962",1226:"ed34399c",1362:"ab7e1ec5",1402:"5bc6edcc",2170:"ebeebe3c",2275:"6d1db94e",2647:"442da986",2698:"0730c0af",2699:"65721931",2794:"c1a68b30",2814:"a92cdaeb",3009:"21284465",3358:"df766fe2",3587:"e046259c",3671:"14e36a4d",3829:"2a47bdd2",4065:"79e088d3",4280:"2100fb18",4380:"358cf740",4443:"f54a719f",4452:"7f0d1de7",4484:"36e9e778",4591:"06d9fd0b",4736:"95203970",4984:"8df42d59",5140:"9541a33c",5142:"691faa50",5233:"ea86fea6",5705:"79ff171c",5909:"92d7ee77",6288:"1e0bf6ea",6339:"71687c00",6652:"07533954",6959:"00ccd71f",7015:"05fff3ee",7029:"8812a8be",7058:"1bffe547",7342:"ce97d43f",7479:"f5f691b4",7586:"052eb7c6",7766:"728fc0bc",8301:"59a8c6bf",8398:"e309a3c2",8605:"bfa0c905",8785:"fa277bf8",8890:"1932c262",9235:"e318bcd5",9409:"17740e18",9617:"56b2b95d",9646:"f273a2d5",9782:"03300db1",9828:"02da4631",9991:"5aeac15e",10001:"5c87cb43",10201:"f27835d2",10218:"d01b2999",10341:"9bb4f8b8",10388:"eeb9709a",10499:"5a924c44",10527:"9843479b",10828:"a4553d0d",10967:"bdc81e1e",10972:"bfd6544a",11477:"33c87ff5",11539:"92360ac1",11657:"a6240e17",11713:"cd9cef4e",12072:"2b42fa6d",12431:"df0fd6b1",12561:"acba7467",12979:"eddb07fb",13050:"1ce768ca",13107:"82ea8621",13134:"76625832",13143:"c388d623",13244:"71f6240e",13479:"45266115",13703:"e6202db6",13751:"1f2d2496",13755:"1a67012a",13832:"742c41ef",13948:"183ab949",14065:"fc08bea3",14272:"45e9070e",14516:"70bdbe7e",14860:"e0f98972",14910:"4d7f4085",15287:"5889feb5",15450:"39242097",15665:"a614478f",16274:"45d17c34",16511:"2a3bafe5",16594:"73515851",16742:"59b8b60f",16884:"3cab5ebc",17167:"3ff11a94",17187:"ecd6e604",17234:"2fc0afee",17284:"d2fdc847",17353:"3d294842",17542:"fb94b0ca",17625:"125374d0",17951:"1e958367",18139:"695ac6ad",18471:"595921ec",18674:"1855f473",18730:"3d5c8ba9",18855:"cd86d191",18907:"ae33969e",19001:"7281a5cc",19096:"739e7244",19267:"f7eb1c5e",19468:"1b41c252",19881:"a9c9b079",20026:"76eee45c",20490:"4f98e0f2",20522:"07b86566",20709:"002bb2a0",20943:"425707be",21065:"a41e193b",21405:"8cad1850",21602:"61750831",22215:"a6a37ed8",22636:"4c65f9a0",23057:"1ad7eda9",23851:"aaf0b50e",23946:"ad78fa4c",24079:"3a7c022b",24153:"a85450b6",24438:"f1ca4a2b",24608:"9c4d2d11",24825:"817ca35c",24938:"fdbcbb23",24958:"bde97521",25048:"87f69e44",25077:"39f84e06",25146:"0997c567",25246:"b5a11b9d",25356:"e744b233",25757:"47e735a8",26074:"cc3aa371",26107:"b5fdfa9c",26234:"ce958100",26802:"c15ebab9",26866:"b52b946a",27307:"a5de26aa",27377:"10548e4b",27598:"8b29878d",27624:"5389827e",27657:"b399705d",27693:"b34f260b",27872:"694cc66b",27918:"d0a8d8ed",27991:"4ac447ee",28267:"922c9c35",28441:"ff1cda8f",28863:"e78497cb",29025:"ad1779fa",29231:"6939bb2d",29514:"c2b68c2b",29893:"6400f7b4",29898:"fabe3892",30010:"a251ece7",30021:"8103d2bc",30765:"7d6a817f",30870:"0a7ea360",31034:"657c9eb3",31105:"601dcbb2",31210:"99bebbdf",31460:"73b8518f",31495:"c91216ad",31503:"2c1b93ad",31508:"a3894107",31532:"8a4f7677",31680:"823a7231",31684:"b02c9aa6",32089:"2e3743ec",32118:"c4b5d617",32185:"e3b8eb14",32227:"4cbb576e",32236:"d74dfb6a",32367:"e00f9da9",32523:"4e711026",32751:"35cffd82",32887:"32d9e8e2",33367:"2e74a184",33492:"18242563",33560:"3d37821a",33763:"13ea4824",33893:"9c209ddd",34224:"b1754939",34353:"5df010f9",34610:"1714674b",34643:"dea6b928",34817:"f5960bf6",35032:"593d5472",35328:"bcd6e1e2",35420:"979421bb",35456:"e55e1167",35514:"dbdedade",35600:"350ac85a",35693:"9fe9997d",35707:"71cdb4d7",35971:"6fafd2f6",36093:"0ac5190c",36129:"d4d41b5b",36298:"3a36bf4e",36565:"ff24bae5",36882:"adaced3e",37039:"fb0272bc",37259:"f4c9441f",37410:"7a21e8ee",38399:"282f4b75",38659:"83321961",38758:"43634279",38814:"82effc26",38890:"3685fb76",38933:"2aef4719",39289:"b1fa1f56",39633:"00747b08",39792:"105fd077",39976:"df33ef04",40116:"940177fa",40206:"366c057e",40335:"62c09e15",40414:"02e194ae",40512:"894e2f8e",40561:"4b5a46ab",40705:"8f8b91e7",40968:"cb073fc8",40992:"9759ebdb",41099:"86c883cf",41115:"49dbda5f",41772:"396480ae",42140:"f4b0ef01",42150:"2e2273df",42479:"64d658ec",42553:"a62ebfeb",42632:"5305238a",43257:"776c3f9d",43397:"0a3344ae",43611:"19e99f2b",43956:"55f22f88",43976:"683d82c7",44008:"a5952380",44043:"dd23ea4d",44265:"c07ff1fb",44482:"ff4e5b9b",44544:"0cb89fa6",44801:"7bcab8c9",44849:"fd8a19d4",45037:"c1a9a579",45141:"416ea356",45290:"6e6bbb42",45327:"d9b995e6",45389:"2266523d",45589:"c6032b91",45602:"55d99a93",45661:"6d358b1a",45843:"3ce6215c",45907:"3a7096a4",46103:"c269c6ad",46402:"4bfe5322",46750:"2d2b65a4",46871:"6a0d45ec",46939:"af368ed5",46945:"4deecdf7",47025:"b2455dd2",47176:"d6a48d9b",47224:"8350e7bb",47371:"75ac95a0",47429:"fe4f0b48",47760:"9f080d13",47871:"775ce29b",47953:"ff7ca46e",48360:"704c48e5",48493:"a87b73be",48510:"9d33d44b",48610:"884dfaf4",48932:"0a2ccfb1",48934:"10f3a163",48951:"04e2ec14",48983:"99ada184",49073:"1b25eb30",49352:"43cad4ba",49526:"2e0b689f",49836:"cfaa7ef2",49933:"48463dd2",50683:"a872d20e",50765:"39abfd54",50801:"f8bc35d7",50947:"0cc3e088",50996:"79164d6e",51334:"f84c3fb8",51358:"b52f814a",51669:"781523b4",51969:"04895f4a",52057:"d414b0a4",52066:"13035838",52341:"73eb8e32",52535:"813fdab8",52791:"80206f7e",52825:"58568080",52989:"a3cd670c",53233:"861129ab",53249:"a92dedd1",53325:"8c55345d",53414:"2d023348",53544:"be4aee5c",53550:"38b2fc26",53608:"51fbf6e8",53674:"d66956ff",53981:"0501d788",54086:"fd0b8865",54118:"4f07fe4c",54185:"eedaaa83",54202:"8704e433",54351:"d2e4f02c",54827:"221219b1",55040:"d1cb509f",55212:"a98669a3",55604:"da90566d",55739:"c16587c9",55844:"8af1b4e3",55901:"ece48539",56054:"ea7982d5",56436:"126d99cf",56556:"843e3673",56592:"97df0ab9",56637:"c44dca55",56681:"bd27ea90",56733:"cbe4865f",57186:"24d8f4dc",57242:"c56ec24e",57520:"d7c702e7",57626:"b28cc499",57791:"3f2df52f",58283:"582213c5",58284:"02eb845f",58326:"b50a1ad3",58375:"299a811d",59273:"20dcc2e0",59289:"665c9727",59310:"60d2b2a2",59591:"db1cce21",59656:"69668bd4",59732:"ebea58ca",59801:"80e44253",60002:"953a68a1",60045:"32fa5591",60109:"5e7dacc4",60195:"0977f346",60591:"c5eb60b7",60851:"e2d6a9ba",61285:"9125b074",61428:"5d865764",61551:"32bdbf62",61689:"f27a7a8d",61854:"f7f140d1",62039:"ab30f483",62111:"1cd62ed7",62121:"c0d66c5c",62226:"ce1b52d2",62316:"2e53f7c7",62394:"127504f9",62829:"e29b823a",63020:"2061029d",63058:"bffba846",63107:"2452e806",63454:"a5822721",63818:"d881a858",64013:"75f40f00",64063:"9ba6cd49",64107:"bbac9179",64243:"64199d87",64416:"66cc4e72",64422:"7f37a95e",64519:"88bf6eb9",64599:"bee6e2cd",64873:"2869890f",64884:"10798e59",64985:"f6241921",65078:"7777c109",65112:"9205de00",65444:"8a6f349f",65467:"ece7bf27",65477:"9e2aac8b",65574:"a3fd16aa",65628:"c261c2b6",65655:"eb3568af",65842:"ec8d426c",65932:"ae8658cd",66170:"6cceac36",66226:"8047968b",66277:"531643ed",66562:"d1db0526",66627:"ccccff81",66778:"87cc0099",66954:"5c118f9f",67153:"2bd3f770",67285:"6e3629f5",67301:"35652e99",67442:"77572a1a",67492:"7473edac",67493:"bb01ace5",67525:"74efa83d",67541:"498e1050",67664:"6e803a88",67910:"030fcd0a",67982:"90e28c3e",68045:"60b02929",68097:"064ef2b9",68217:"51ed944b",68398:"7aec6936",68539:"c61fff36",68616:"e453b2b9",68626:"abc91477",68869:"aa78a90d",68952:"4b2959bc",69125:"bbaaa208",69437:"801cd286",69468:"04631c0e",69618:"93ac4698",69748:"92a03eee",70033:"c81dd0c8",70137:"368a5099",70144:"92d738b0",70525:"23197b34",70625:"7957c2c3",70710:"50baea1c",70714:"96cdf24d",70831:"99acb492",70956:"cd763dcb",71247:"8b4ea1ef",71843:"21cf01bf",72129:"c8ec0c77",72498:"5d7c0346",72975:"5680c01b",73028:"688cc111",73255:"42586d2b",73460:"5f5bb905",73464:"3900e296",73566:"e4f00ea4",73657:"9ec77816",74075:"4055cc8e",74095:"151ea8e0",74121:"89f11c39",74270:"0cfc0b2e",74436:"49293339",74549:"ac95ce6c",74692:"1eaaee6e",74783:"b8d70a26",74987:"8fa80799",75024:"74c9462d",75070:"6a69a2f3",75189:"53ebff88",75492:"449a8d40",75626:"b82ec7c1",76306:"58f424ba",76374:"e84b10de",77142:"58e97215",77258:"7d12cb50",77269:"bb2b45c0",77308:"b9bacdeb",77408:"6ab376ff",77518:"77ebdecc",77634:"0c20142e",77736:"640151b2",77999:"5a2db566",78029:"84c90162",78039:"87a215ee",78060:"9273d2d9",78250:"83166b8e",78273:"8fdb4e87",78504:"b7737141",78785:"1961bedc",78793:"459e3f97",79208:"9f4dc055",79311:"d55e217d",79807:"11748ff0",79915:"cfd9d0b4",80053:"e882ea03",80077:"39adf45f",80203:"09ec72d0",80308:"426a8693",80484:"3e4b8dd7",80576:"02760f9d",80732:"f9e2201b",80957:"4fd62edd",81005:"fbb3df04",81377:"b564a3f3",81489:"20017eac",81714:"406f0762",81842:"de2dc423",81926:"5a6e4cc3",82060:"43ad112d",82241:"b48d5c7e",82545:"92663992",82815:"e6bb7000",83175:"65ab5364",83390:"bfb8ce7f",83440:"43e78a9e",83552:"ab1cd766",83782:"a73df2b7",83792:"f5d52c8a",83869:"c4848101",83890:"9f8a4ccb",84041:"87972b88",84128:"3f768f80",84217:"7e5fd77d",84621:"9ec906be",84633:"d7c3d271",84849:"f38a38a5",84866:"5fd157bf",85050:"18d3c9cb",85115:"710d4a0c",85136:"8a0afea4",85232:"58fb1e51",85435:"15019197",85449:"31077f7f",85481:"aec9457a",85493:"29f86198",85591:"f933c583",85637:"a5671c75",85760:"f6e0a06a",86561:"c3c75307",86599:"7fbea23f",86646:"b4060782",86894:"d2081f40",87054:"e59e7145",87280:"de394ad9",87388:"93580f74",87456:"c4c4342f",87464:"cbddcd27",87682:"4a113f2e",87709:"e08cb997",87754:"0d4a573e",87819:"9cda9b8b",88187:"33cb0fba",88433:"70b6fd4d",88532:"3a9112b8",88838:"ce6821fb",89459:"81b65887",89480:"79950ca4",89670:"8898bde3",89738:"79c0042d",89749:"dd663aed",89778:"85cc1d55",89801:"63b959ff",90064:"47ace020",90072:"e8a8f895",90125:"94b03ee0",90239:"58d3e98f",90250:"128dc57e",90371:"e4037c42",90533:"b8164802",90728:"471deafa",90814:"f59a7fa7",91360:"168ad11f",91375:"a293ad8a",91610:"9bace73b",91679:"9d380da4",91799:"255a9367",91951:"9047373d",91983:"a1d5ce3c",92074:"6b207a3b",92200:"2c247713",92273:"c616aef0",92290:"b3d7d750",92469:"89da1599",92518:"ac395260",92519:"aa4dfbd2",92706:"bb0be4f8",92715:"3e1cf124",92872:"fa5b18bf",93042:"a01f354a",93089:"1ea8fcbb",93119:"3c04575c",93171:"cfe43121",93264:"bddb9a40",93316:"99fd8323",93430:"d92b7d53",93570:"7e2183ff",93586:"2bdb92d3",93810:"92610bb8",93954:"dc9c9686",94073:"fe70bc9b",94507:"6d2364b5",94532:"e2dcc3c9",94551:"dfdd8a66",94629:"2eaf9893",94915:"4be70ab3",94976:"8f29526e",95159:"346a075c",95597:"0f252f81",95869:"060bac3c",95980:"e3a399ad",96170:"256bc801",96241:"47e8e610",96476:"57f1e942",96477:"36284a74",96542:"eee928eb",96647:"efbfbff6",96698:"1506ccce",96701:"aac9cc11",96736:"436b2353",97616:"ffab3f59",97915:"0b4f047d",97948:"96b083c6",98049:"95b7d2f3",98129:"1956ed49",98311:"675ef83e",98701:"75250e43",98981:"c903e5f1",99615:"0c948941",99784:"fff93b62",99874:"fa81c65b",99923:"c6bbe583",99924:"5ffd65fa",99928:"fc84903d"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.92d87943.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,a){return Object.prototype.hasOwnProperty.call(e,a)},f={},d="linkis-web-apache:",n.l=function(e,a,c,b){if(f[e])f[e].push(a);else{var t,r;if(void 0!==c)for(var o=document.getElementsByTagName("script"),i=0;i Apache Linkis - + @@ -15,7 +15,7 @@
Linkis Blessing Wall
We hope that WeDataSphere will become better and better, and work together for national digital transformation.
We hope that the WeDataSphere community can contribute more open source projects to you in the field of data platform, truly help you reduce the landing cost of big data technology and improve the efficiency of enterprise big data use.
I wish WeDataSphere better and better, and more projects will become top projects of Apache. Thank you for the strong support and help of handsome, qiang, Hua, You, Ping and other community leaders in product use and secondary development.
We hope that the WeDataSphere will become more and more robust and the community will become more and more active.
I have been in contact with Weizhong DSS for more than half a year. The excellent framework design of DSS, combined with the Linkis computing middleware, greatly simplifies the development of upper layer applications. With the help of DSS, our team has quickly developed modules of multiple data consoles. It is hoped that the DSS community will become better and better, and more developers will contribute code, so that Linkis can support more underlying engines and enrich the third-party application components of the DSS community ecology.
DSS has done a great thing: let the world have no difficult big data Road, let everyone be an analyst no longer out of reach, and let the one-stop development, analysis, operation and maintenance no longer be separated. Pay tribute to DSS.
We very much agree with the one-stop concept of WeDataSphere, which well shields the complexity of the underlying open source components, greatly reduces the threshold of big data development, allows users to focus more on their own business implementation and maximize the value of data. Although the out of the box effect has not been achieved due to the complexity of technology and the diversity of components, this is the direction of our efforts. As more users and experts join the project and continue to polish the product, the product will become more and more intelligent and humanized in the future, and the future can be expected. In addition, as a developer, I am honored to participate in the development of the WeDataSphere linkis component. I hope I can contribute more to the community.
<
1
2
3
4
5
6
7
>
- + \ No newline at end of file diff --git a/blog/2022/02/08/how-to-user-blog/index.html b/blog/2022/02/08/how-to-user-blog/index.html index 675e02d9d84..bb39d55a2fe 100644 --- a/blog/2022/02/08/how-to-user-blog/index.html +++ b/blog/2022/02/08/how-to-user-blog/index.html @@ -7,7 +7,7 @@ How to Write a Blog | Apache Linkis - + @@ -28,7 +28,7 @@ blog/authors.yml

Casion:   name: Casion   title: Development Engineer of WeBank   url: https://github.com/casionone/   image_url: https://avatars.githubusercontent.com/u/7869972?v=4
- + \ No newline at end of file diff --git a/blog/2022/02/21/linkis-deploy/index.html b/blog/2022/02/21/linkis-deploy/index.html index 3f19dbbb14d..0527fad22ed 100644 --- a/blog/2022/02/21/linkis-deploy/index.html +++ b/blog/2022/02/21/linkis-deploy/index.html @@ -7,7 +7,7 @@ Linkis Deployment Troubleshooting | Apache Linkis - + @@ -74,7 +74,7 @@ search

7. How to obtain relevant information#

Linkis official website documents are constantly improving, you can view/keyword search related documents on this official website.

Related blog post links

- + \ No newline at end of file diff --git a/blog/2022/03/20/openlookeng/index.html b/blog/2022/03/20/openlookeng/index.html index 50e969e1182..8d24d59ae04 100644 --- a/blog/2022/03/20/openlookeng/index.html +++ b/blog/2022/03/20/openlookeng/index.html @@ -7,7 +7,7 @@ Implementation of OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/2022/04/15/how-to-download-engineconn-plugin/index.html b/blog/2022/04/15/how-to-download-engineconn-plugin/index.html index 0b4a7156c49..aadbf4c1368 100644 --- a/blog/2022/04/15/how-to-download-engineconn-plugin/index.html +++ b/blog/2022/04/15/how-to-download-engineconn-plugin/index.html @@ -7,7 +7,7 @@ How to Download Engine Plugins Not Included in the Installation Package By Default | Apache Linkis - + @@ -18,7 +18,7 @@

Copy the engine material package to be used to the engine plug-in directory of linkis, and then refresh the engine material.

Detailed process referenceInstalling EngineConnPlugin engine.

- + \ No newline at end of file diff --git a/blog/2022/06/09/meetup-content-review/index.html b/blog/2022/06/09/meetup-content-review/index.html index e744b736263..224795cc640 100644 --- a/blog/2022/06/09/meetup-content-review/index.html +++ b/blog/2022/06/09/meetup-content-review/index.html @@ -7,7 +7,7 @@ Apache Linkis(Incubating) Meep Up | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/2022/07/04/how-to-add-auto-bot/index.html b/blog/2022/07/04/how-to-add-auto-bot/index.html index 603450af421..832f3f023a3 100644 --- a/blog/2022/07/04/how-to-add-auto-bot/index.html +++ b/blog/2022/07/04/how-to-add-auto-bot/index.html @@ -7,7 +7,7 @@ How to add a GitHub Action for the GitHub repository | Apache Linkis - + @@ -32,7 +32,7 @@ - name: Close Issue uses: peter-evans/close-issue@v2 if: ${{ github.event.pull_request.merged }} with: issue-number: ${{ steps.Closer.outputs.issueNumber }} comment: The associated PR has been merged, this issue is automatically closed, you can reopend if necessary. env: Github_Token: ${{ secrets.GITHUB_TOKEN }} PRNUM: ${{ github.event.pull_request.number }}
- + \ No newline at end of file diff --git a/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html b/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html index 00a3b9d43c1..801f27db6bb 100644 --- a/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html +++ b/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html @@ -7,7 +7,7 @@ deploy linkis with kubernetes | Apache Linkis - + @@ -48,7 +48,7 @@ kubectl config view  kubectl config get-contexts  kubectl cluster-info  
- + \ No newline at end of file diff --git a/blog/archive/index.html b/blog/archive/index.html index 06a4285ab4b..e5d3d748dc1 100644 --- a/blog/archive/index.html +++ b/blog/archive/index.html @@ -7,7 +7,7 @@ Archive | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index c28e9021dfb..96be3a5320f 100644 --- a/blog/index.html +++ b/blog/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -72,7 +72,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/page/2/index.html b/blog/page/2/index.html index 7e3df76baa6..056d084c3d5 100644 --- a/blog/page/2/index.html +++ b/blog/page/2/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -74,7 +74,7 @@ search

7. How to obtain relevant information#

Linkis official website documents are constantly improving, you can view/keyword search related documents on this official website.

Related blog post links

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/blog/index.html b/blog/tags/blog/index.html index c45135706a4..285237c2afb 100644 --- a/blog/tags/blog/index.html +++ b/blog/tags/blog/index.html @@ -7,7 +7,7 @@ One post tagged with "blog" | Apache Linkis - + @@ -15,7 +15,7 @@

One post tagged with "blog"

View All Tags

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/engine/index.html b/blog/tags/engine/index.html index 2eca0169268..218075d0a04 100644 --- a/blog/tags/engine/index.html +++ b/blog/tags/engine/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "engine" | Apache Linkis - + @@ -22,7 +22,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/tags/github/index.html b/blog/tags/github/index.html index 839c36231f7..5cc17bcf4ac 100644 --- a/blog/tags/github/index.html +++ b/blog/tags/github/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "github" | Apache Linkis - + @@ -65,7 +65,7 @@ - name: Close Issue uses: peter-evans/close-issue@v2 if: ${{ github.event.pull_request.merged }} with: issue-number: ${{ steps.Closer.outputs.issueNumber }} comment: The associated PR has been merged, this issue is automatically closed, you can reopend if necessary. env: Github_Token: ${{ secrets.GITHUB_TOKEN }} PRNUM: ${{ github.event.pull_request.number }}
- + \ No newline at end of file diff --git a/blog/tags/guide/index.html b/blog/tags/guide/index.html index 2d998a15977..98c27a255c5 100644 --- a/blog/tags/guide/index.html +++ b/blog/tags/guide/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "guide" | Apache Linkis - + @@ -18,7 +18,7 @@

Copy the engine material package to be used to the engine plug-in directory of linkis, and then refresh the engine material.

Detailed process referenceInstalling EngineConnPlugin engine.

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 87b9d1b00e0..58621915abc 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/tags/meetup/index.html b/blog/tags/meetup/index.html index b3766eb8f48..6c8fc7736fb 100644 --- a/blog/tags/meetup/index.html +++ b/blog/tags/meetup/index.html @@ -7,7 +7,7 @@ One post tagged with "meetup" | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/community/development_specification/api/index.html b/community/development_specification/api/index.html index 2bbff67f982..6813a68e9d5 100644 --- a/community/development_specification/api/index.html +++ b/community/development_specification/api/index.html @@ -7,7 +7,7 @@ API Specification | Apache Linkis - + @@ -26,7 +26,7 @@

Convention :

  • method: Returns the requested RESTful API URL, mainly for the WebSocket mode.

  • status: Returns status information, where: -1 means not login, 0 means success, 1 means error, 2 means failed validation, and 3 means no access to the interface.

  • data: Returns the specific data.

  • message: Returns a prompt message for the request. If status is not 0, message will return an error message, where data may have a stack trace field, and return the specific stack information.

In addition: Different status cause different HTTP status code, under normal circumstances:

  • When status is 0, the HTTP status code is 200

  • When the status is -1, the HTTP status code is 401

  • When status is 1, the HTTP status code is 400

  • When status is 2, the HTTP status code is 412

  • When status is 3, the HTTP status code is 403

- + \ No newline at end of file diff --git a/community/development_specification/concurrent/index.html b/community/development_specification/concurrent/index.html index 8d533fead22..0c3ef571c71 100644 --- a/community/development_specification/concurrent/index.html +++ b/community/development_specification/concurrent/index.html @@ -7,7 +7,7 @@ Concurrent Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Concurrent Specification

  1. [Compulsory] Make sure getting a singleton object to be thread-safe. Operating inside singletons should also be kept thread-safe.
  2. [Compulsory] Thread resources must be provided through the thread pool, and it is not allowed to explicitly create threads in the application.
  3. SimpleDateFormat is a thread-unsafe class. It is recommended to use the DataUtils utility class.
  4. [Compulsory] At high concurrency, synchronous calls should consider the performance cost of locking. If you can use lockless data structures, don't use locks. If you can lock blocks, don't lock the whole method body. If you can use object locks, don't use class locks.
  5. [Compulsory] Use ThreadLocal as less as possible. Everytime using ThreadLocal and it holds an object which needs to be closed, remember to close it to release.
- + \ No newline at end of file diff --git a/community/development_specification/exception_catch/index.html b/community/development_specification/exception_catch/index.html index 5fb296bd41f..79997acadee 100644 --- a/community/development_specification/exception_catch/index.html +++ b/community/development_specification/exception_catch/index.html @@ -7,7 +7,7 @@ Exception Catch Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Exception Catch Specification

  1. [Mandatory] For the exception of each small module, a special exception class should be defined to facilitate the subsequent generation of error codes for users. It is not allowed to throw any RuntimeException or directly throw Exception.
  2. Try not to try-catch a large section of code. This is irresponsible. Please distinguish between stable code and non-stable code when catching. Stable code refers to code that will not go wrong anyway. For the catch of unstable code, try to distinguish the exception types as much as possible, and then do the corresponding exception handling.
  3. [Mandatory] The purpose of catching an exception is to handle it. Don't throw it away without handling it. If you don't want to handle it, please throw the exception to its caller. Note: Do not use e.printStackTrace() under any circumstances! The outermost business users must deal with exceptions and turn them into content that users can understand.
  4. The finally block must close the resource object and the stream object, and try-catch if there is an exception.
  5. [Mandatory] Prevent NullPointerException. The return value of the method can be null, and it is not mandatory to return an empty collection, or an empty object, etc., but a comment must be added to fully explain under what circumstances the null value will be returned. RPC and SpringCloud Feign calls all require non-empty judgments.
- + \ No newline at end of file diff --git a/community/development_specification/license/index.html b/community/development_specification/license/index.html index a7b2789e520..a4362b30be0 100644 --- a/community/development_specification/license/index.html +++ b/community/development_specification/license/index.html @@ -7,7 +7,7 @@ License Notes | Apache Linkis - + @@ -36,7 +36,7 @@ Maven repository:https://mvnrepository.com/artifact/io.etcd/jetcd-corehttps://mvnrepository.com/artifact/io.etcd/jetcd-launcher

Reference articles#

- + \ No newline at end of file diff --git a/community/development_specification/log/index.html b/community/development_specification/log/index.html index 32959e7cce9..82658653525 100644 --- a/community/development_specification/log/index.html +++ b/community/development_specification/log/index.html @@ -7,7 +7,7 @@ Log Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Log Specification

  1. [Convention] Linkis chooses SLF4J and Log4J2 as the log printing framework, removing the logback in the Spring-Cloud package. Since SLF4J will randomly select a logging framework for binding, it is necessary to exclude bridging packages such as SLF4J-LOG4J after introducing new Maven packages in the future, otherwise log printing will be a problem. However, if the newly introduced Maven package depends on a package such as Log4J, do not exclude, otherwise the code may run with an error.
  2. [Configuration] The log4j2 configuration file is default to log4j2.xml and needs to be placed in the classpath. If springcloud combination is needed, "logging:config:classpath:log4j2-spring.xml"(the location of the configuration file) can be added to application.yml.
  3. [Compulsory] The API of the logging system (log4j2, Log4j, Logback) cannot be used directly in the class. For Scala code, force inheritance from Logging traits is required. For Java, use LoggerFactory.GetLogger(getClass).
  4. [Development Convention] Since engineConn is started by engineConnManager from the command line, we specify the path of the log configuration file on the command line, and also modify the log configuration during the code execution. In particular, redirect the engineConn log to the system's standard out. So the log configuration file for the EngineConn convention is defined in the EnginePlugin and named log4j2-engineConn.xml (this is the convention name and cannot be changed).
  5. [Compulsory] Strictly differentiate log levels. Fatal logs should be thrown and exited using System.out(-1) when the SpringCloud application is initialized. Error-level exceptions are those that developers must care about and handle. Do not use them casually. The WARN level is the logs of user action exceptions and some logs to troubleshoot bugs later. INFO is the key process log. Debug is a mode log, write as little as possible.
  6. [Compulsory] Requirements: Every module must have INFO level log; Every key process must have INFO level log. The daemon thread must have a WARN level log to clean up resources, etc.
  7. [Compulsory] Exception information should include two types of information: crime scene information and exception stack information. If not, then throw it by keyword. Example: logger.error(Parameters/Objects.toString + "_" + e.getMessage(), e);
- + \ No newline at end of file diff --git a/community/development_specification/overview/index.html b/community/development_specification/overview/index.html index 4338543a451..739e9288491 100644 --- a/community/development_specification/overview/index.html +++ b/community/development_specification/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/community/development_specification/path_usage/index.html b/community/development_specification/path_usage/index.html index d9353544c9c..ef0a4aaa82a 100644 --- a/community/development_specification/path_usage/index.html +++ b/community/development_specification/path_usage/index.html @@ -7,7 +7,7 @@ Path Usage Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Path Usage Specification

Please note: Linkis provides a unified Storage module, so you must follow the Linkis path specification when using the path or configuring the path in the configuration file.

  1. [Compulsory]When using a file path, whether it is local, HDFS, or HTTP, the schema information must be included. Among them:

    • The Scheme header for local file is: file:///;

    • The Scheme header for HDFS is: hdfs:///;

    • The Scheme header for HTTP is: http:///.

  1. There should be no special characters in the path. Try to use the combination of English characters, underline and numbers.
- + \ No newline at end of file diff --git a/community/development_specification/programming_specification/index.html b/community/development_specification/programming_specification/index.html index 15768e08d58..1ce2a9b2267 100644 --- a/community/development_specification/programming_specification/index.html +++ b/community/development_specification/programming_specification/index.html @@ -7,7 +7,7 @@ Programming Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Programming Specification

1. Naming Convention#

  1. [Mandatory] Do not use Chinese pinyin and unintelligible abbreviations
  2. For basic Java naming conventions, please refer to naming-conventions
  3. [Constraints] There is a scalastyle style configuration file in linkis, if it does not conform to the specification, you need to rename it according to the scalastyle style
  4. [Mandatory] Configuration files, startup file, process name, configuration keys,etc. also need to comply with naming conventions, which are as follows:
ClassificationStyleSpecificationsExamples
Configuration fileSeparated by lowercase'-'linkis-classification level (ps/cg/mg)-service name.propertislinkis-cg-linkismanager.properties
Start-stop scriptSeparated by lowercase'-'linkis-classification level-service namelinkis-cg-linkismanager
Module directorySeparated by lowercase'-'The module directory must be below the corresponding classification level, and the module name is a subdirectorylinkis-public-enhancements/linkis-bml
Process namingCamel case namingStart with Linkis and end with service nameLinkisBMLApplication
Configuration Key NamingSeparated by lowercase'.'linkis+module name+keyNamelinkis.bml.hdfs.prefix

2. Annotation Protocol#

  1. [Mandatory] The class, class attribute, interface method must be commented, and the comment must use the Javadoc specification, using the format of /**content*/
  2. [Mandatory] All abstract methods (including methods in interfaces) must be annotated with Javadoc. In addition to return values, parameters, and exception descriptions, they must also indicate what the method does and what functions it implements
- + \ No newline at end of file diff --git a/community/development_specification/release-notes/index.html b/community/development_specification/release-notes/index.html index 0389d4c24e3..519f8d937c9 100644 --- a/community/development_specification/release-notes/index.html +++ b/community/development_specification/release-notes/index.html @@ -7,7 +7,7 @@ Release-Notes Writing Specification | Apache Linkis - + @@ -16,7 +16,7 @@ Web console WebInstall InstallInstall-Scripts Install-ScriptsInstall-SQL Install-SqlInstall-Web Install-WebCommon module Common
- + \ No newline at end of file diff --git a/community/development_specification/unit_test/index.html b/community/development_specification/unit_test/index.html index 811c63e6520..858d58c61d6 100644 --- a/community/development_specification/unit_test/index.html +++ b/community/development_specification/unit_test/index.html @@ -7,7 +7,7 @@ Test Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Test Specification

  1. [Mandatory] Tool classes and internal interfaces of services must have test case.
  2. [Mandatory] Unit testing needs to be able to be automated (triggered by mvn compilation), independence (unit test cases cannot call each other), and repeatable execution (can be executed multiple times, with the same result)
  3. [Mandatory] A test case should only test one method.
  4. [Mandatory] Test case exceptions cannot be caught and need to be thrown upwards.
  5. [Mandatory] The unit test code must be written in the following project directory: src/test/java or scala, and it is not allowed to be written in other records.
  6. [Recommended] Unit testing needs to consider boundary conditions, such as the end of the month and February.
  7. [Recommended] For database-related unit tests, consider data rollback.
- + \ No newline at end of file diff --git a/community/development_specification/version_feature_specifications/index.html b/community/development_specification/version_feature_specifications/index.html index 48b086af904..6f135208313 100644 --- a/community/development_specification/version_feature_specifications/index.html +++ b/community/development_specification/version_feature_specifications/index.html @@ -7,7 +7,7 @@ Version and New Feature Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Version and New Feature Specification

1. New version specification#

When you need a new version, you need to follow the steps below:

  1. [Mandatory] The new version must be organized for PMC and developers to discuss, and meeting minutes must be recorded and sent to the mailing list
  2. [Mandatory] The scope of the new version of the feature list requires email voting. 3+ PMC approval is required and the approval votes are greater than the negative votes
  3. [Mandatory] After the version is voted on, the corresponding version needs to be established on GitHub Project
  4. [Mandatory] Each feature needs to send a separate mailing list to explain the design reasons and design ideas
  5. [Mandatory] The mailing list needs to be sent to installation, database, configuration modification
  6. [Recommended] One feature corresponds to one issue corresponds to one PR
  7. [Mandatory] Each version requires CICD to pass and test cases to pass before the version can be released
  8. [Constraints] Each version needs to have a corresponding leader, and the leader needs to manage related issues and PRs, and hold discussions, actively respond to emails, confirm plans, track progress, etc.

2. New feature specification#

When you add new features, you need to follow the steps below:

  1. [Mandatory] New features need to send emails to vote, and attach design reasons and design ideas
  2. [Mandatory] New features need to be added to the version corresponding to GitHub Project
  3. [Mandatory] The mailing list needs to be sent to installation, database, configuration modification
  4. [Mandatory] New features must add new documents
  5. [Mandatory] New features need to add corresponding unit tests, Unit Test Specification
  6. [Recommended] One feature corresponds to one issue corresponds to one PR
- + \ No newline at end of file diff --git a/community/how-to-contribute-to-website/index.html b/community/how-to-contribute-to-website/index.html index 15f48f2ef16..6027902f1f1 100644 --- a/community/how-to-contribute-to-website/index.html +++ b/community/how-to-contribute-to-website/index.html @@ -7,7 +7,7 @@ How to Participate in the Official Website Contribution | Apache Linkis - + @@ -22,7 +22,7 @@ After the verification is correct, the asf-staging branch can be merged to the asf-site branch. The internal mechanism of Apache will deploy the content of the asf-site branch to the formal environment. After the merge, the formal environment is considered to be updated successfully.

6 Points for attention#

  • When adding Chinese documents, you need to add English documents at the same time, otherwise the added page cannot be displayed
  • When adding a document, pay attention to the document node name and node sorting fields, as shown in the following figure
    pg-eng

7 Other#

The naming convention refers to "Alibaba Front-end Development Specification"

- + \ No newline at end of file diff --git a/community/how-to-contribute/index.html b/community/how-to-contribute/index.html index b1e3a2687e9..4163a97ff51 100644 --- a/community/how-to-contribute/index.html +++ b/community/how-to-contribute/index.html @@ -7,7 +7,7 @@ How to Participate in Project Contribution | Apache Linkis - + @@ -18,7 +18,7 @@ Whether it is a bug fix or a new feature development, please submit a PR to the dev-* branch.
  • PR and submission name follow the principle of <type>(<scope>): <subject>, for details, please refer to Ruan Yifeng's Commit message and Change log writing guide this article.
  • If the PR contains new features, the document update should be included in this PR.
  • If this PR is not ready to merge, please add [WIP] prefix to the head of the name (WIP = work-in-progress).
  • All submissions to dev-* branches need to go through at least one review before they can be merged
  • 2.4 Review Standard#

    Before contributing code, you can find out what kind of submissions are popular in Review. Simply put, if a submission can bring as many gains as possible and as few side effects or risks as possible, the higher the probability of it being merged, the faster the review will be. Submissions with high risk and low value are almost impossible to merge, and may be rejected Review.

    2.4.1 Gain#

    • Fix the main cause of the bug
    • Add or fix a function or problem that a large number of users urgently need
    • Simple and effective
    • Easy to test, with test cases
    • Reduce complexity and amount of code
    • Issues that have been discussed by the community and identified for improvement

    2.4.2 Side effects and risks#

    • Only fix the surface phenomenon of the bug
    • Introduce new features with high complexity
    • Add complexity to meet niche needs
    • Change stable existing API or semantics
    • Cause other functions to not operate normally
    • Add a lot of dependencies
    • Change the dependency version at will
    • Submit a large number of codes or changes at once

    2.4.3 Reviewer notes#

    • Please use a constructive tone to write comments
    • If you need to make changes by the submitter, please clearly state all the content that needs to be modified to complete the Pull Request
    • If a PR is found to have brought new problems after the merger, the Reviewer needs to contact the PR author and communicate to solve the problem; if the PR author cannot be contacted, the Reviewer needs to restore the PR

    ##3, Outstanding Contributor

    3.1 About Committers (Collaborators)#

    3.1.1 How to become Committer#

    If you have submitted a valuable PR to Linkis and have been merged, or contributed continuously for more than half a year, and have led the release of at least one version, you can find a PMC of the Linkis project through the official WeChat group, if he is willing to nominate you as a committer , And are willing to state your contribution to all PMC and Committer, then a vote will be initiated; PMC and other Committers will vote together to decide whether to allow you to join, if you get enough votes, you will become Committer of the Linkis project .

    3.1.2 Committer's#

    • You can join the official developer WeChat group to participate in discussions and formulate Linkis development plans
    • Can manage Issues, including closing and adding tags
    • Can create and manage project branches, except for master and dev-* branches
    • You can review the PR submitted to the dev-* branch
    • Can apply to become a Committee member

    3.2 About Committee#

    3.2.1 How to become a Committee member#

    If you are the Committer of the Linkis project, and all your contributions have been recognized by other Committee members, you can apply to become a member of the Linkis Committee, and other Committee members will vote together to decide whether to allow you to join. If you pass unanimously, you will become a member of the Linkis Committee.

    3.2.2 Rights of Committee members#

    • You can merge PRs submitted by other Committers and contributors to the dev-** branch
    • Participate in determining the roadmap and development direction of the Linkis project
    • Can participate in the new version release
    - + \ No newline at end of file diff --git a/community/how-to-email/index.html b/community/how-to-email/index.html index c112816f74f..422035794bd 100644 --- a/community/how-to-email/index.html +++ b/community/how-to-email/index.html @@ -7,7 +7,7 @@ How to Use Email List | Apache Linkis - + @@ -15,7 +15,7 @@

    How to Use Email List

    Introduces mailing list usage guidelines and reference examples

    To subscribe to the mailing list, please refer to this Subscription Guidelines

    Linkis' archived mail can be viewed here archived mail

    1. Themes#

    The subject of the email can be roughly divided into the following types

    • [DISCUSS] Discussion on a feature/function/logic modification/CI/CD, which can be implementation/design/optimization suggestion, etc.
    • [PROPOSAL] Proposals, such as adding/removing certain features, are not much different from [DISCUSS]
    • [VOTE] Vote for changes/elect Committer/elect new PPMC members, etc., such as version release, each version will be voted on in the community dev mailing list; you can also choose multiple options ,Poll.
    • [ANNOUNCE] Announce the completion of the release of the new version, announcing the new Committer/PPMC elected, etc.
    • [NOTICE] Mainly used to notify some temporary announcements, etc., such as the community sandbox environment is suspended for maintenance/upgrade, the web official website is abnormally unavailable, etc.; as well as online and offline weekly meetings/exchange meetings and various Event information announcements, etc.
    • [HELP] Ask for help, because there are many git code notifications, and sometimes it is impossible to check them in time; in addition, github network access is limited, and some students may not be able to submit issues through github smoothly. Initiated by email, it is easier to identify and be perceived.
    • [VOTE][RESULT] Announce the results of the release vote

    2. Mail Specifications#

    general specification
    • Whenever possible do not send plain HTML messages, but plain text. If you use QQ mailbox, its email content is in html format by default. Please switch to plain text text format when writing. For detailed switching instructions, see the appendix of this article.
    • Developers/community users/PPMC members initiate email discussions/needs for help/notifications for the content of the above scenarios, please send them to dev@linkis.apache.org
    • Please put the corresponding type prefix before the email subject: such as [HELP] XXXXXXX, [DISCUSS] XXXXXXX

    For more information, please refer to the official Mail Etiquette https://infra.apache.org/contrib-email-tips

    [DISCUSS/Proposal] Mail

    • Title [DISCUSS][module name] XXXX (if it is a specific module, it is recommended to bring the module name)
    • Generally, create a corresponding issue on Github's issues column, and then initiate an email discussion
    • Briefly describe clearly the content of the discussion/proposal (eg: background/what problem you want to solve/how to solve it)
    • Modules involved (if one or two specific modules are involved)
    • Graphical and textual information such as relevant design instructions can be placed in the corresponding issue for easy modification, and the link can be quoted in the email.
    • The corresponding Chinese translation can be attached

    [HELP] Mail

    3. Sample reference#

    [DISCUSS/Proposal] Example

    [VOTE] Example

    [ANNOUNCE] Example

    [NOTICE] Example

    [HELP] Example

    4. Mail usage of PPMC#

    From the determination of a version to the release, it may involve specific common scenarios of using email

    1. The new version needs to organize PMC and developers to discuss, record the minutes of the meeting, determine the function points of this version, the general release time of the plan, and the release manager of this time, etc., and send the meeting minutes private@linkis.apache.org Email list.
    2. For the scope of the feature list of the new version, you need to send a voting email to dev@linkis.apache.org, and 3+ PMCs are required to agree and the yes votes are greater than the negative votes.
    3. For the weekly regular meeting hosted, the meeting invitation reminder email needs to be released before the meeting / the meeting minutes email should be sent to the dev@linkis.apache.org mailbox after the meeting
    4. New committer/ppmc votes need to be sent to private@linkis.apache.org. See https://community.apache.org/newcommitter.html for the new committee/ppmc selection process

    5. How to Reply to Version Release Voting Emails#

    If a release vote is initiated, after verification (see How to verify for the detailed verification process), you can refer to this reply example for email reply

    If you initiate a posting vote, you can refer to this response example to reply to the email after verification

    When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid.

    PPMC/IPMC member voting is best with the binding suffix, indicating a binding vote, which is convenient for counting voting results

    Non-PPMC/Non-IPMC member

    +1 (non-binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    PPMC/IPMC member

    +1 (binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    6. Appendix#

    QQ mailbox switch to plain text format

    image

    - + \ No newline at end of file diff --git a/community/how-to-participate-in-developer-meetings/index.html b/community/how-to-participate-in-developer-meetings/index.html index 522653d72e5..a2977565cb9 100644 --- a/community/how-to-participate-in-developer-meetings/index.html +++ b/community/how-to-participate-in-developer-meetings/index.html @@ -7,7 +7,7 @@ How to participate in developer meetings | Apache Linkis - + @@ -16,7 +16,7 @@ It is strongly recommended to subscribe to the Apache mail to keep abreast of the latest developments in the community. Subscription strategy: https://linkis.apache.org/zh-CN/community/how-to-subscribe

    Regular meeting documentation#

    1. "Documents on Weekly Meeting Issues": Record the weekly meeting time, host, issues and other information;
    2. "Linkis Incubation Progress": record information such as Linkis version progress, development specifications, community management, etc.
    3. "Question Collection": Record community developers' questions, answers and other information

    Regular meetings held#

    - + \ No newline at end of file diff --git a/community/how-to-release/index.html b/community/how-to-release/index.html index 8a07efc8cde..2c7aeb98b64 100644 --- a/community/how-to-release/index.html +++ b/community/how-to-release/index.html @@ -7,7 +7,7 @@ How to Release | Apache Linkis - + @@ -120,7 +120,7 @@ For more information, please refer to the official Email Specification https://infra.apache.org/contrib-email-tips

    Gmail mailbox switch to plain text format

    image

    QQ mailbox switch to plain text format

    image

    - + \ No newline at end of file diff --git a/community/how-to-sign-apache-icla/index.html b/community/how-to-sign-apache-icla/index.html index 64ba5ddfac6..b4804c9a4a1 100644 --- a/community/how-to-sign-apache-icla/index.html +++ b/community/how-to-sign-apache-icla/index.html @@ -7,7 +7,7 @@ ICLA Signing Process | Apache Linkis - + @@ -18,7 +18,7 @@ Hello Apache Incubator: I have accepted the Apache Linkis(Incubator) PPMC invitation to become linkis committer, the attachment is my ICLA information.Thanks!

    Here is a specific example:

    example

    6.After the email is sent successfully, Apache ShenYu official community informs

    Manual signature and PDF software signature Demo#

    PDF online signature

    • Download the PDF source file
    • Fill in items and personal information
    • Open PDF and sign URL online
    • Signature
    • Save and download signed pdf fils
    • Send to the specified mailbox

    Handwritten signature

    • Download the PDF source file
    • Fill in items and personal information
    • Print documents
    • Handwritten signature
    • Convert photos into single pdf file
    • Send to the specified mailbox

    For example files, please refer to https://github.com/casionone/incubator-linkis-website/tree/dev/resource/wangming-icla.pdf

    - + \ No newline at end of file diff --git a/community/how-to-subscribe/index.html b/community/how-to-subscribe/index.html index b4b22ae95ed..b2652dc45e0 100644 --- a/community/how-to-subscribe/index.html +++ b/community/how-to-subscribe/index.html @@ -7,7 +7,7 @@ How to Subscribe | Apache Linkis - + @@ -15,7 +15,7 @@

    How to Subscribe

    Apache has configured a series of mailing lists for each project. Mailing lists are an important form of communication in the Apache community.

    Many things in the daily operation and maintenance of the community are carried by mailing lists, such as technical discussions, any ideas or suggestions, project questions and answers, new functions/features/major changes decisions and notifications, version release voting, etc. As long as it is related to the project, you can initiate a discussion here.

    As long as you subscribe to this mailing list, you can get the latest developments in the Linkis community for the first time and keep pace with the community.

    Linkis project mailing list

    namedescriptionSubscribeUnsubscribearchive
    dev@linkis.apache.orgcommunity activity informationsubscribeunsubscribearchive
    commits@linkis.apache.orgCode repo update informationsubscribeunsubscribearchive

    1. Subscribe to the mailing list#

    For example,Take subscribing to the dev@linkis.apache.org mailing list

    The steps are as follows:

    1. Send an email without any content or subject: dev-subscribe@linkis.apache.org
    2. Wait until you receive an email with the subject line confirm subscribe to dev@linkis.apache.org (if you have not received it for a long time, please confirm whether the email is blocked by your email, if you have not been blocked and will receive a reply for a long time, return to step 1)
    3. Reply directly to the email without changing the subject line or adding the email content.
    4. Wait until you receive an email with the subject line WELCOME to dev@linkis.apache.org .
    5. If you receive an email from (4), you have successfully subscribed to the email. To initiate a discussion, you can send an email directly to dev@linkis.apache.org, which will be sent to everyone who subscribed to the mailing list.

    2. Unsubscribe from the mailing list#

    The steps for unsubscribing to a mailing list are similar to those for subscribing to a mailing list::

    1. Send an email without any content or subject to: dev-unsubscribe@linkis.apache.org
    2. Wait until you receive an email with the subject line confirm unsubscribe from dev@linkis.apache.org
    3. Reply directly to the email without changing the subject line or adding the email content
    4. Wait until you receive an email with the subject line GOODBYE from dev@linkis.apache.org
    5. Unsubscribe success

    3. Issues related#

    Linkis's issues address https://github.com/apache/incubator-linkis/issues

    For new problem/ideas, you can create a new issue through [new issues], and describe the issues in detail as much as possible, so that community members can follow up, discuss and solve them. For any problems of the project, it is recommended to create issues first to record and follow up, so that the entire process can be well preserved and archived to facilitate subsequent users to retrieve.

    For existing issues, if you are interested, you can reply and discuss as much as you want. For tasks/bug-type issues, if you are interested, you can follow or directly participate in the task. Community partners are very welcome to contribute their efforts to Linkis.

    - + \ No newline at end of file diff --git a/community/how-to-verify/index.html b/community/how-to-verify/index.html index e3f3035ee19..018d12418db 100644 --- a/community/how-to-verify/index.html +++ b/community/how-to-verify/index.html @@ -7,7 +7,7 @@ How to Verify | Apache Linkis - + @@ -26,7 +26,7 @@

    2.4.4 Check related compliance items#

    and check as follows:

    • Check whether the source package contains unnecessary files, which makes the tar package too large
    • Folder contains the word incubating
    • There are LICENSE and NOTICE files
    • There is a DISCLAIMER or DISCLAIMER-WIP file
    • The year in the NOTICE file is correct
    • Only text files exist, not binary files
    • All files have ASF license at the beginning
    • Able to compile correctly
    • Check for extra files or folders, such as empty folders, etc.
    • .....

    2.5 Check the binary package#

    If the binary/web-binary package is uploaded, check the binary package.

    Unzip apache-linkis-${release_version}-incubating-bin.tar.gz

    $ mkdir apache-linkis-${release_version}-incubating-bin$ tar -xvf  apache-linkis-${release_version}-incubating-bin.tar.gz -C  apache-linkis-${release_version}-incubating-bin$ cd apache-linkis-${release_version}-incubating-bin

    and check as follows:

    • Folder contains the word incubating
    • There are LICENSE and NOTICE files
    • There is a DISCLAIMER or DISCLAIMER-WIP file
    • The year in the NOTICE file is correct
    • All text files have ASF license at the beginning
    • Check the third-party dependent license:
    • Compatible with third-party dependent licenses
    • All third-party dependent licenses are named in the LICENSE file
    • If you are relying on the Apache license and there is a NOTICE file, then these NOTICE files also need to be added to the version of the NOTICE file
    • .....

    You can refer to this article: ASF Third Party License Policy

    3. Email reply#

    If you initiate a posting vote, you can refer to this response example to reply to the email after verification

    When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid.

    When PPMC votes in the dev@linkis.apache.org linkis community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the linkis community, and it is convenient to count the voting results.

    When IPMC votes in the general@incubator.apache.org incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results.

    note

    If you have already voted on dev@linkis.apache.org, you can take it directly to the incubator community when you reply to the vote, such as:

    //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changesForward my +1 from dev@linkis (non-binding)Copy my +1 from linkis DEV ML (non-binding)

    Non-PPMC/Non-IPMC member

    +1 (non-binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    PPMC/IPMC member

    //Incubator community voting, only IPMC members have binding binding+1 (binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    4. Precautions#

    If you have maven tools installed, you can replace ./mvnw or mvnw.cmd with your own mvn command

    mvnw is short for Maven Wrapper. It can support running Maven projects without installing Maven and configuring environment variables. If it can't find it, it will download the corresponding Maven version according to the configuration file

    - + \ No newline at end of file diff --git a/community/how-to-vote-a-committer-ppmc/index.html b/community/how-to-vote-a-committer-ppmc/index.html index ca7d9731227..731cb3f55e7 100644 --- a/community/how-to-vote-a-committer-ppmc/index.html +++ b/community/how-to-vote-a-committer-ppmc/index.html @@ -7,7 +7,7 @@ How to Vote New Committer/PPMC | Apache Linkis - + @@ -42,7 +42,7 @@ Thanks!The Apache Linkis(Incubating) PPMC

    At this point, the whole process is over, and the candidate officially becomes the Committer or PPMC of the project.

    - + \ No newline at end of file diff --git a/community/how-to-write-unit-test-code/index.html b/community/how-to-write-unit-test-code/index.html index b0fcf4a160e..db3ce1a138b 100644 --- a/community/how-to-write-unit-test-code/index.html +++ b/community/how-to-write-unit-test-code/index.html @@ -7,7 +7,7 @@ How to Write Unit Test Code | Apache Linkis - + @@ -48,7 +48,7 @@ #配置mybatis-plus的mapper信息 因为使用的是mybatis-plus,使用mybatis-plusmybatis-plus.mapper-locations=classpath:org/apache/linkis/jobhistory/dao/impl/JobDetailMapper.xml,classpath:org/apache/linkis/jobhistory/dao/impl/JobHistoryMapper.xmlmybatis-plus.type-aliases-package=org.apache.linkis.jobhistory.entitymybatis-plus.configuration.log-impl=org.apache.ibatis.logging.stdout.StdOutImpl

    List is configured with predicate of stream to make assertion judgment and write specification

    1. Use @Transactional and @Rollback to realize data rollback and avoid data pollution
    2. Each DaoTest should have a public method for creating and initializing data (or the way of importing data CSV) to prepare data. For related queries, updates, deletions and other operations, the public method should be called first to prepare data
    3. Create test data. If an attribute value is a self increasing ID, it should not be assigned
    4. The test data created shall be consistent with the actual sample data as far as possible
    5. When updating the data test, if the field allows, please prefix it with 'modify original value'
    - + \ No newline at end of file diff --git a/community/microservice-division/index.html b/community/microservice-division/index.html index 35b0da4becb..276b91ce243 100644 --- a/community/microservice-division/index.html +++ b/community/microservice-division/index.html @@ -7,7 +7,7 @@ Division of Microservices | Apache Linkis - + @@ -15,7 +15,7 @@

    Division of Microservices

    Introduction to service#

    Linkis is developed based on the microservice architecture, and its services can be divided into 3 types of service groups (groups): computing governance service group, public enhancement service group and microservice governance service group.

    • Computation Governance Services: The core service for processing tasks, supporting the 3 main stages of the computing task/request processing flow (submit->prepare->execute);
    • Public Enhancement Services: Provide basic support services, including context services, engine/udf material management services, historical tasks and other public services and data source management services;
    • Microservice Governance Services: Customized Spring Cloud Gateway, Eureka. Provides a base for microservices.

    The microservices included in each service group (group) are as follows:

    Belonging to the microservice group (group)Service nameMain functions
    MGSlinkis-mg-eurekaResponsible for service registration and discovery, other upstream components will also reuse the linkis registry, such as dss
    MGSlinkis-mg-gatewayAs the gateway entrance of Linkis, it is mainly responsible for request forwarding and user access authentication
    CGSlinkis-cg-entranceThe task submission entry is a service responsible for receiving, scheduling, forwarding execution requests, and life cycle management of computing tasks, and can return calculation results, logs, and progress to the caller
    CGSlinkis-cg-linkismanagerProvides AppManager (application management), ResourceManager (resource management), LabelManager (label management) capabilities
    CGSlinkis-cg-engineconnpluginThe engine connector plug-in provides the basic function support for freely extending the Linkis engine, and allows the introduction of new engines into the execution life cycle of computing middleware by implementing the established plug-in interfaces, enabling new engines to be implemented Rapid Deployment
    CGSlinkis-cg-engineconnmanagerThe manager of EngineConn, which provides the life cycle management of the engine, and reports the load information and its own health status to the ResourceManager
    CGSlinkis-cg-engineconnis the actual connection service with the underlying computing storage engine (Hive/Spark), and contains session information with the actual engine. For the underlying computing storage engine, it acts as a client
    PESlinkis-ps-publicserviceProvide functions such as unified configuration management, context service, BML material library, data source management, microservice management and historical task query for other microservice modules
    PESlinkis-ps-csContext service, solving a data application development process, data and information sharing across multiple services
    PESlinkis-ps-metadatamanagerOnly provides metadata query service Provides the basic query function of database data metadata, provides http interface externally, and provides rpc service internally, which is convenient for the data source management module to call through rpc to conduct Data source connection test
    PESlinkis-ps-data-source-managerData source management service Perform basic management of data sources, and provide http interfaces such as adding, querying, modifying, and connection testing of external data sources. The rpc service is provided internally, which is convenient for the data element management module to call through rpc and query the necessary information needed to establish a connection to the database

    Basic terms explained#

    IntroductionFull name in EnglishFull name in Chinese
    CG/cgComputation GovernanceComputation Governance
    MG/mgMicroservice GovernanceMicroservice Governance
    PS/psPublic ServicePublic Service
    CS/csContext ServiceUnified Context
    DSS/dssDataSphere StudioData Application Integrated Development Framework
    EC/ecEngineConnEngine Connector
    ECM/ecmEngineConnManagerManagement of Engine Connectors
    ECP/ecpEngineConnPluginEngine Connector Plugin
    RM/rmResourceManagerResource manager for managing node resources
    PES/pesPublic Enhancement Services
    DMS/dmsData Source Manager ServiceData Source Management Service
    MDS/mdsMetaData Manager ServiceMetadata Management Service
    BML/bmlBigData Material library
    UJESUnified Job Execute ServiceUnified Job Execute Service
    DDL/ddlData Definition LanguageDatabase Definition Language
    DML/dmlData Manipulation LanguageData Manipulation Language
    - + \ No newline at end of file diff --git a/community/ppmc-related-permission-configuration/index.html b/community/ppmc-related-permission-configuration/index.html index 70895a7017d..fcb76526c8d 100644 --- a/community/ppmc-related-permission-configuration/index.html +++ b/community/ppmc-related-permission-configuration/index.html @@ -7,7 +7,7 @@ PPMC/Committer Related Permission Configuration | Apache Linkis - + @@ -27,7 +27,7 @@ Every PPMC member is required to subscribe to the following mailing lists:

    NameDescriptionSubscribe MailUnsubscribe MailMail Archive
    dev@linkis.apache.orgLinkis community activity information, project discussion announcements, etc.Subscribeunsubscribearchive
    private@linkis.apache.orgThis mailing list is private, visible inside PPMC, mainly for internal discussions[Subscribe](mailto:private-subscribe@ linkis.apache.org)unsubscribearchive
    general@incubator.apache.orgPublic emails from the incubator community, mainly related to incubation projects[Subscribe](mailto:general-subscribe@incubator.apache. org)unsubscribearchive

    Subscription operations can be found in the guide Subscribe to the mailing list

    note

    Note: private@linkis.apache.org subscriptions need to be reviewed by the mail moderator (shaofengshi@apache.org), so please attach personal name information to the content of the mail when subscribing for moderator review.

    If the above subscription is unsuccessful, you can try to use the web-side tool: https://whismy.apache.org/committers/subscribe.cgi

    image

    Mailing list subscriptions, PPMC members can view here: https://whismy.apache.org/roster/ppmc/linkis

    - + \ No newline at end of file diff --git a/community/security/index.html b/community/security/index.html index b6d7c871acb..94e20b67d1b 100644 --- a/community/security/index.html +++ b/community/security/index.html @@ -7,7 +7,7 @@ Security | Apache Linkis - + @@ -15,7 +15,7 @@

    Security

    The Apache Software Foundation takes a rigorous stance on eliminating security issues in its software projects. Likewise, Apache Linkis is also vigilant and takes security issues related to its features and functionality into the highest consideration.

    If you have any concerns regarding Linkis’s security, or you discover a vulnerability or potential threat, please don’t hesitate to get in touch with the Apache Security Team by dropping an email at security@apache.org.

    Please specify the project name as "Linkis" in the email, and provide a description of the relevant problem or potential threat. You are also urged to recommend how to reproduce and replicate the issue.

    The Apache Security Team and the Linkis community will get back to you after assessing and analyzing the findings.

    Please note that the security issue should be reported on the security email first, before disclosing it on any public domain.

    - + \ No newline at end of file diff --git a/docs/0.11.0/api/login_api/index.html b/docs/0.11.0/api/login_api/index.html index 58f80789bd3..3889cf98b2e 100644 --- a/docs/0.11.0/api/login_api/index.html +++ b/docs/0.11.0/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -16,7 +16,7 @@ -Heartbeat

    4 Interface details#

    4.1 Login#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": "/api/rest_j/v1/user/login",        "status": 0,        "message": "OK",        "data": {            "isAdmin": false,            "loginNum": 5,            "userName": "enjoyyin",            "lastLoginTime": 1722222222222        }      }

    4.2 Logout#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successfully!"    }

    4.3 Heartbeat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintaining the heartbeat success!"    }
    - + \ No newline at end of file diff --git a/docs/0.11.0/api/rest_api/index.html b/docs/0.11.0/api/rest_api/index.html index 5b02a2b6f35..c1e71d0b282 100644 --- a/docs/0.11.0/api/rest_api/index.html +++ b/docs/0.11.0/api/rest_api/index.html @@ -7,7 +7,7 @@ Restful Api | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/0.11.0/api/web_socket/index.html b/docs/0.11.0/api/web_socket/index.html index 8616418d2ad..f3b557b1e4c 100644 --- a/docs/0.11.0/api/web_socket/index.html +++ b/docs/0.11.0/api/web_socket/index.html @@ -7,7 +7,7 @@ WebSocket | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    WebSocket

    Linkis provides access to WebSocket, and the frontend can interact with Link's WebSocket only in real time and does not need to be queried through Restful.

    1 Linkis interface specification#

    Linkis defines a set of its own interface norms when interacting at the back and back end.

    If you are interested in interface specifications, please click hereto view interface norms

    2 WebSocket Interface Summary#

    We provide the following interfaces to facilitate rapid user submission of Jobs for implementation.

    • Create WebSocket Connection
    • Submit for Implementation
    • Service active return status, logs, and progress

    3 Interface Details#

    3.1 Establishing a connection#

    This interface is intended to create a WebSocket connection with Linkis.

    • /api/res_j/entrance/connect

    • Request Method GET

    • Response status code 101

    3.2 Submission of implementation#

    Requested task is to submit user's assignment to Linkis for execution interface

    • Interface /api/res_j/entrance/execution

    • Submit Method POST

    • Request JSON Example

    {    "method":"/api/rest_j/v1/entrance/execute",    "data":{        "params": {            "variable":{                "k1":"v1"            },            "configuration":{                "special":{                    "k2":"v2"                },                "runtime":{                    "k3":"v3"                },                "startup":{                    "k4":"v4"                }            }        },        "executeApplicationName":"spark",        "executionCode":"show tables",        "runType":"sql",        "source":{            "scriptPath": "/home/Linkis/Linkis.sql"        },    "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"    }}
    • The parameters in the requested body data are described below.
    Parameter NameDefinition of parametersTypeRemarks
    executeApplicationNameEngine services such as Spark, hive, etc. the user expects to useStringmust not be empty
    requestApplicationNameName of system to launch the requestStringis empty
    paramsUser-specified parameters for running the service programMapRequired, the value inside is empty
    Execution CodeExecution code submitted by userStringmust not be empty
    runTypeWhen users perform such services as spark, they can select python, R, SQL, etc.Stringmust not be empty
    scriptPathPath to store for user submitted code scriptsStringIf IDE, it cannot be empty with execution code
    • Return Example
    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "The request was executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_johnnwang_21",   "taskID": "123"   }}
    • The execID is the unique execution ID generated for the task after the user task has been submitted to UJES, the String type that is useful only when the task is running, similar to the PID concept.ExecID is designed (requestApplicationName length) (executeAppName length1) (Instalment 2)${requestApplicationName}${executeApplicationName}${entranceInstanceip+port}${requestApplicationName}${umUser}${index}
    • taskID is the unique ID that represents the user submission of tasks. This ID is generated by database auto-adding, long type

    3.3 Task Status, Logs, Progress Proactive Push#

    Once executed, the server will take the initiative to push information about the status, logs, progress, etc. You can also use WebSocket to ask for status, logs, and progress.

    Server has initiated the following content:

    • Logs
    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",    "log": ["error","warn","info", "all"],  "taskID":28594,    "fromLine": 56  },  "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"}
    • Status
    LOD  "method": "/api/res_j/v1/entrance/${execID}/status",  "status": 0,  "message": "Return status information",  "data": {    "execID": "${execID}",    "taskID":28594,      "status": "Running",  },  "websocketTag": "37fcbd8b762d465a0c860684a0261c6e"}
    • Progress
    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "taskID":28594,    "progress": 0.2,    "progressInfo": [        {            "id": "job-1",            "succeedTasks": 2,            "failedTasks": 0,            "runningTasks": 5,            "totalTasks": 10        },        {            "id": "job-2",            "succeedTasks": 5,            "failedTasks": 0,            "runningTasks": 5,            "totalTasks": 10        }    ]  },  "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"}
    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/real-time_log_push/index.html b/docs/0.11.0/architecture/commons/real-time_log_push/index.html index 6104d677db6..7555b233935 100644 --- a/docs/0.11.0/architecture/commons/real-time_log_push/index.html +++ b/docs/0.11.0/architecture/commons/real-time_log_push/index.html @@ -7,7 +7,7 @@ Asynchronous Log Live Push | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Asynchronous Log Live Push

    Interservice Asynchronous Push Scheme

    1 Background#

    With the use of the microservice architecture, multiple microservices will be deployed on different servers, and the logs generated by each microservice will be distributed among the servers. While the ELK is able to filter the logs to be viewed by users, there is a lack of time and customization.

    If log disks before collection logs are used, it will be difficult to classify logs according to the user's running information, first because of the excess time spent on disk, and then because of the loss of user information during the running phase after the log disk.

    2 Ideas#

    Design an Appender based on log4j2 and add to the configuration of the microservice log, then use the listener design mode and the RPC's service call to implement the log in real time.

    In-service Asynchronous Times Push Scheme

    3 Implementation#

    3.1 Design implementation of Appender components#

    The current background development of the more popular log framework uses slf4j as the faculty for logic, logback or log4j2, and now the open source project will prefer log4j2 as a framework for log printing, owing to its performance advantages and open source license.

    Appender is a target (destination) printed in log4j2 logs. Once a log event is generated during the microservice running, all the Appenders registered in the configuration will get this log event.

    The Linkis designed Appender will cache the acquired log event in the queue after the log microservice log is generated, and we have a listener in the Appender.

    3.2 The design implementation of listeners#

    The listener mode is a common design mode and is a common method of implementing asynchronous callback programming.

    The listener needs to listen for the log event queue and if the log event queue is full, the log will be removed from the queue and sent via HTTP.

    The listener also needs to listen to the task state and after the task has completed all the execution steps in this microservice, it will be necessary to send all the logs cached in the queue to prevent the log loss.

    3.3 Design implementation for the cache queue#

    The reason for using the cache queue is that if a log event is generated it will push the microservice to receive the log because HTTP requests are too frequent, so it must be cached and the general cache queue can be designed as a blockqueue with the maximum number of caches.

    3.4 Logging collection#

    The log will be sent to the microservice collected by the log, which will then sort and encapsulated the log, which will be pushed to the user interface, and the log will be asynchronous and backups of a task log for each user.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/rpc/index.html b/docs/0.11.0/architecture/commons/rpc/index.html index b3cef4d9209..ca501d6ce5b 100644 --- a/docs/0.11.0/architecture/commons/rpc/index.html +++ b/docs/0.11.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    RPC Architecture

    1 Background#

    The call of HTTP interface between Feign-based microservices can only satisfy a simple A microservice instance that randomly selects a service instance in B microservices according to simple rules, and if this B microservice instance wants to asynchronously return information To the caller, it is simply impossible to achieve.

    At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2 Introduction#

    Linkis has implemented a set of its own underlying RPC communication scheme based on Feign.

    As the underlying communication solution, Linkis RPC integrates the SDK into the microservices in need.

    A microservice can be both a request caller and a request receiver.

    As the request caller, the Receiver of the target receiver's microservice will be requested through the Sender. As the request receiver, the Receiver will be provided to process the request sent by the request receiver Sender in order to complete a synchronous response or an asynchronous response.

    Linkis RPC architecture diagram

    3 Implementation#

    Based on the Sender system of the requesting party and the Receiver system of the requesting party, the entire structure of Linkis RPC is formed.

    Linkis RPC detailed architecture diagram

    3.1 Sending end#

    As the underlying communication layer, Linkis RPC does not require users to write any actual code on the sending end.

    -1) The user obtains a Sender by calling the SDK provided by Linkis RPC, using the microservice name (Service Name) or specifying the microservice instance (microservice name + IP and port of the microservice instance).

    Sender provides usable methods, see the following pseudo code:

    abstract class Sender {Object ask(Object message);Object ask(Object message, Duration timeout);void send(Object message);void deliver(Object message);}

    where:

        1. The ask method is a synchronous request response method, requiring the receiving end to return a response synchronously;    2. The send method is a synchronous request method, which is only responsible for sending the request to the receiving end synchronously, and does not require the receiving end to give a reply;    3. Deliver is an asynchronous request method. As long as the process on the sending end does not exit abnormally, the request will be sent to the receiving end through other threads later.

    -2) As the sender, the user sends a request to the receiver through the request method provided by the sender.

    -3) The Sender sends the user's request to the interceptor. The interceptor intercepts the request and starts to do additional functional processing on the request:

     a) Broadcast interceptor.  The broadcast interceptor only takes effect for requests that need to be broadcast.  The broadcast interceptor will provide a special broadcast interface. If this request implements the broadcast interface and the request is not being broadcast, the broadcast interceptor thinks that this request needs to be broadcast, and the broadcast operation will be triggered at this time.  The specific steps are: get all the microservice instances for which the request needs to be broadcasted. If it is empty, it will broadcast to all instances of the microservice by default; then mark the request as being broadcast and call step 1) to obtain the corresponding microservice instances. All Senders start to send requests in a multi-threaded manner; when all Senders in the thread pool have finished sending requests, the broadcast request is marked as successful and returned to the user to complete the processing.  b) Retry the interceptor.  The retry interceptor will provide a retry function for all the next steps.  If the sender successfully sends the request, but the receiver returns an exception that requires a retry, the retry interceptor will be triggered to re-submit the request automatically; if the request does not specify a specific instance of the microservice receiver, send If a ConnectException (connection exception) occurs during the request, it will actively retry; or if the user has specified certain exceptions to be retryed, the retry interceptor will automatically retry at this time.  c) Cache interceptor.  The cache interceptor is set for synchronization requests whose response content is unlikely to change frequently.  The cache interceptor will also provide a special cache interface. If this request implements the cache interface, it will first look for whether the request has cached the response from the receiving end in the cache interceptor. If so, it will directly return the cached response, otherwise continue to connect. After the pull-down step and the response is returned at the receiving end, the response is first cached, and then the response is returned to the user, and the processing is completed.  d) The default interceptor.  The default interceptor is used to call the next processing steps.  e) Custom interceptor. Users can also implement their own custom interceptors to achieve some specific functions.

    -4) The request encoder will first convert the data (entity bean) requested by the user into a serialized JSON string, and then pass it to the Feign client generator.

    -5) Feign client generator, which generates Feign client that can access the receiver Restful request receiver.

    -6) The generated Feign client will call the service discovery manager to obtain a list of all microservices. Through the service selector, if the user specifies the microservice name in step 1), then it will pass Feign's load balancing strategy. Select a suitable receiver microservice instance for request forwarding, otherwise the service selector will rewrite Spring Cloud Feign's FeignLoadBalancer (Feign load balancer). When creating LoadBalancerCommand, specify the corresponding microservice instance as step 1) Obtain The microservice instance specified when Sender.

    -7) Call the Feign client to start requesting the Restful request receiver on the receiving end.

    3.2 Receiver#

    The receiving end requires users to implement the Receiver interface for processing real business logic.

    1) The Restful request receiver, as an embedded HTTP request Web Service service in Linkis RPC, is responsible for receiving requests from the sender

    2) After the Restful request receiver receives the request, it first calls the request decoder to decode the request, and parses out the actual request information and sender microservice information (microservice name + microservice instance IP and port), if the analysis fails , Will directly respond to the failure of the analysis request.

    3) Put the parsed request information and sender microservice information into the request message queue;

    4) The request consumer will consume the decoded sender request in the request message queue.

    Obtain a suitable Receiver by calling the Receiver manager; at the same time, use the Sender generator to generate a Sender pointing to the sender through the obtained sender microservice information. Then the request consumer sends the actual request information and the generated sender Sender to the Receiver for processing;

    5) Receiver, as the actual processing unit of user requests, requires users to implement the Receiver interface to complete the actual processing logic of the caller request.

    The pseudo code of Receiver is as follows:

    public interface Receiver {void receive(Object message, Sender sender);Object receiveAndReply(Object message, Sender sender);    Object receiveAndReply(Object message, Duration duration, Sender sender);}

    Receiver provides methods to handle synchronous and asynchronous requests.

    6) If this request is an asynchronous request, the Receive method of Receiver is called, and the upper-layer business decides whether it needs to send back the response through the Sender of the sender.

    7) If this request is a synchronous request, call Receiver's receiveAndReply method, take the return value as the response result, and send back to the sender.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/scheduler/index.html b/docs/0.11.0/architecture/commons/scheduler/index.html index e8db43d1d3e..3db60a24106 100644 --- a/docs/0.11.0/architecture/commons/scheduler/index.html +++ b/docs/0.11.0/architecture/commons/scheduler/index.html @@ -7,7 +7,7 @@ Scheduler Architecture | Apache Linkis - + @@ -16,7 +16,7 @@ If it is within the allowable range, the grouped object will get the corresponding consumer through the consumption manager. While setting the target parameter to the target value, set the other parameters to the corresponding value according to the matched group of numbers. .

    b) The parameter limit must meet a certain ratio.

    The grouped object gets the corresponding consumer through the consumer manager, and while setting the target parameter to the target value, the other parameters are also calculated in proportion to the corresponding target value, and all are reset.

    In addition to manually setting parameters, each consumer has an independent monitoring thread to count the length of the waiting queue in the consumer, the number of events being executed, and the growth rate of execution time.

    In each grouping object, thresholds and alarm ratios are set for these indicators. Once an indicator exceeds the threshold, or the ratio between multiple indicators exceeds a limited range (for example, when the average execution time is monitored to be greater than the distribution interval parameter, the threshold is considered to be exceeded ), the monitoring thread will immediately expand the consumer accordingly.

    When expanding, it will make full use of the above-mentioned parameter adjustment process to increase a certain parameter in a targeted manner, and other parameters will be automatically expanded accordingly.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/overview/index.html b/docs/0.11.0/architecture/overview/index.html index a9ba379d0dd..b9037212b1c 100644 --- a/docs/0.11.0/architecture/overview/index.html +++ b/docs/0.11.0/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Overview

    1 The original intention of Linkis#

    Almost every component of the big data open source ecosystem has its own set of user rights management, resource management, metadata management, independent API access and usage methods.

    And various new components continue to appear.

    However, the user's business needs usually require the collaborative processing of multiple open source components to achieve.

    For a business requirement, users need to learn the manuals of multiple products, and need to do repeated customized development on multiple products, in order to truly introduce open source components into the actual production environment.

    This has brought extremely high learning costs and extra workload to users, and a large amount of repeated maintenance and management work is also required for operation and maintenance.

    At the same time, the coupling between the upper-level functional tool products and the underlying computing storage system is too high, and the hierarchical structure and calling relationship are not clear and decoupled. As a result, any changes in the underlying environment will directly affect the normal use of business products.

    How to provide a set of unified data middleware, docking with upper-level application tools, shielding various calls and usage details at the bottom, and truly enabling business users to only pay attention to the details of business implementation, even if the underlying big data platform's computer room expansion and overall relocation are both Not affected, is the original intention of Linkis!

    2 Linkis Technical Architecture#

    Technical Architecture

    As shown in the figure above, we have built multiple microservice clusters based on the current popular SpringCloud microservice technology to provide high availability capabilities.

    Each microservice cluster bears part of the system's functional responsibilities, and we have made the following clear divisions. like:

    -Unified Job Execution Service: A distributed REST/WebSocket service for receiving various script requests submitted by users.

    Currently supported computing engines are: Spark, Python, TiSpark, Hive, Shell, etc.

    Supported scripting languages ​​are: SparkSQL, Spark Scala, Pyspark, R, Python, HQL and Shell, etc.;

    For more information about unified job execution services, please check UJES Architecture Design Document

    -Resource Management Service: Support real-time management and control of the resource usage of each system and user, limit the resource usage and concurrency of the system and users, and provide real-time resource dynamic charts to facilitate viewing and managing the system and users resource;

    Currently supported resource types: Yarn queue resources, servers (CPU and memory), number of concurrent users, etc.

    For more information about resource management services, please check RM Architecture Design Document

    -~~Application management service (not available in open source version): Manage all user applications of all systems, including offline batch applications, interactive query applications, and real-time streaming applications, providing powerful replication for offline and interactive applications It also provides application lifecycle management, and automatically releases users’ redundant idle applications; ~~

    -Unified storage service: Universal IO architecture, which can quickly connect to various storage systems, provide a unified call entry, support all commonly used format data, high integration, and easy to use;

    For more information on unified storage services, please check [Storage Architecture Design Document]

    -Unified Context Service: Unified user and system resource files (JAR, ZIP, Properties, etc.), unified management of parameters and variables of users, systems, and calculation engines, one setting and automatic reference everywhere;

    -Material Library: System and user-level material management, which can be shared and transferred, and supports automatic management of the entire life cycle;

    -Metadata Service: Real-time display of database table structure and partition status.

    Rely on the mutual cooperation of these microservices to build a centralized and unified big data platform service externally.

    Through the construction of these services, we have improved the external service methods and processes of the entire big data platform.

    3 Linkis Business Architecture#

    Business Architecture

    Glossary:

    1) Gateway:

    Based on Spring Cloud Gateway, the plug-in function is enhanced, and a gateway service with WebSocket one-to-many capability is added, which is mainly used to parse and route user requests to designated microservices.

    2) Unified entrance:

    The unified portal is the job lifecycle manager of a certain type of engine operation of the user.

    From job generation to submission to the execution engine, to job information feedback to users and job closure, Entrance manages the entire life cycle of a job.

    3) Engine Manager:

    The engine manager is responsible for managing the entire life cycle of the engine.

    Responsible for applying for and locking resources from the resource management service, instantiating a new engine, and monitoring the life state of the engine.

    4) Execution engine:

    The execution engine is a microservice that actually executes user jobs, and it is started by the engine manager.

    In order to improve the interaction performance, the engine service directly interacts with the unified portal of the job submitted to it, executes the job correctly, and feeds back various information required by the user, such as log, progress, status, and result set.

    5) Resource Management Service

    Real-time control of the resource usage of each system and each user, manage the resource usage and actual load of the microservice cluster, and limit the resource usage and concurrency of the system and users.

    6) Eureka

    Eureka is a service discovery framework developed by Netflix. Spring Cloud integrates it into its sub-project spring-cloud-netflix to realize the service discovery function of Spring Cloud.

    Each microservice has a built-in Eureka Client, which can access Eureka Server and obtain the ability of service discovery in real time.

    4 Linkis processing flow#

    Now let's introduce how the user submits a SQL in the upper system, and how Linkis executes and returns the result.

    Process sequence diagram

    1. The user of the upper system submits a SQL, which passes through the Gateway first, and the Gateway is responsible for parsing the user request and routing it to the appropriate unified entrance Entrance

    2. Entrance will first find out whether there is any Spark engine service available for the user of the system, and if so, it will directly submit the request to the Spark engine service

    3. There is no available Spark engine service, start to discover the function through Eureka's service registration, get a list of all engine managers, and obtain the actual load of the engine manager in real time by requesting RM

    4. Entrance got the engine manager with the lowest load and started asking the engine manager to start a Spark engine service

    5. When the engine manager receives the request, it starts to ask the user under the RM system whether they can start the new engine

    6. If it can be started, start requesting resources and lock; otherwise, an exception of startup failure is returned to Entrance

    7. The resource is successfully locked, and the new spark engine service is started; after the startup is successful, the new spark engine is returned to Entrance

    8. After Entrance got the new engine, it began to request SQL execution from the new engine

    9. Spark's new engine receives SQL requests, starts submitting SQL to Yarn for execution, and pushes logs, progress and status to Entrance in real time

    10. Entrance pushes the obtained logs, progress and status to Gateway in real time

    11. Gateway pushes back logs, progress and status to the front end

    12. Once the SQL execution is successful, Engine actively pushes the result set to Entrance, and Entrance informs the front end to get the result.

    For the design plan under abnormal Entrance/EngineManager/Engine, please refer to UJES Architecture Design Document

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/rm/index.html b/docs/0.11.0/architecture/rm/index.html index eec0f154053..0af820b1e6a 100644 --- a/docs/0.11.0/architecture/rm/index.html +++ b/docs/0.11.0/architecture/rm/index.html @@ -7,7 +7,7 @@ RM design | Apache Linkis - + @@ -21,7 +21,7 @@ Type of Java class (a subclass of Resource class), and the corresponding json serialization method.

  • The Java class (subclass of ResultResource class) of all resource allocation results, and the corresponding json serialization method.

  • The encapsulated RM interface (resource registration, offline, application, available resources and resource release requests).

    After calling the client's interface, the client will generate the corresponding RPC command and pass it to a microservice of RM for processing through the Sender. After RM is processed, the result is also returned to the client via RPC.

  • 7 Multi-instance state synchronization#

    Because RM is a key underlying service, in order to prevent the resource allocation of all services from being affected by an abnormality of an RM instance, it is necessary to ensure that multiple RM instances are in service at the same time, and to ensure that a request is received by which instance Processing can ensure the consistency of the results.

    When a user requests the service of RM, he must request it through the forwarding of the gateway service, and cannot directly request a fixed RM instance. Through the service registration and discovery mechanism, the gateway service identifies the RM instance that normally provides the service, and then forwards the RPC request to one of the instances. This ensures that all requests will be processed by the RM instance in the normal state.

    All resource records of RM are stored in the same database, and all RM instances do not maintain their own state. When RM processes a request, any state change involved will obtain state information from the database in real time after the lock is locked, and immediately update the state back to the database after completing the processing logic, and then release the lock. This ensures that when multiple RMs process requests at the same time, they can always be based on the latest status.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/file_system/index.html b/docs/0.11.0/architecture/storage/file_system/index.html index 4cf47ecb5aa..7c3d047046f 100644 --- a/docs/0.11.0/architecture/storage/file_system/index.html +++ b/docs/0.11.0/architecture/storage/file_system/index.html @@ -7,7 +7,7 @@ Docking Multiple File Systems | Apache Linkis - + @@ -24,7 +24,7 @@ Users can connect to different file systems by implementing the File System interface, which is extremely convenient for expansion.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html b/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html index 12a3afc1fd7..ad4f6d0c1f4 100644 --- a/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html +++ b/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html @@ -7,7 +7,7 @@ Access Remote File System Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Access Remote File System Architecture

    1 Background#

    Normally after a JVM process the user only has access to file reading and writing from the user.

    If:User A starts a JVM process on the linux server. If the user is not root (superuser), they can only access local files on that server and only have permission to operate with User A files.

    But there are many scenarios in which we launch the JVM process through User A, hoping to have access to other user files on local filesystem in the context of non-proliferation of document permissions.

    At the same time, how can the HDFS file system be accessed without HDFS installed locally?

    How do you avoid creating Linux users so you can access the relevant files of that user on HDFs?

    2 Ideas#

    By launching the engine manager (IO-Engineer) of the filesystem on the remote server (what is EngineManager?) and providing a compatible client API, allowing users access to remote file systems.

    The entire architecture is shown below in graph:

    Storage Remote Mode Architecture

    3 Implementation#

    (1) User A calls on the client (IO-Client) of the remote filesystem to IO-Client via the incoming file path (FsPath) and user B for proxy;

    (2) Client (IO-Client) receives FsPath and proxy user B for ProxyFS.

    (3) User A operates through ProxyFS on proxyFS files of proxy user B. If the permissions check for the next steps are passed, then you can perform actions such as adding and deleting, reading and writing of files.

    (4) User A is passed through ProxyFS operations to IO-Client and to remote filesystem services that are transmitted via the network and are obtained through the Smart Routing Service (IR) with a lower load remote file service (IO-Engine) during the transmission process.

    (5) When the remote file service (IO-Engine) receives an IO-Client operation, safety rules are used to determine first whether the transferred token is lawful, then whether the IP is lawful, and then whether User A is authorized to act on the file to User B.

    (6) The IO-Engineering will then access the superuser's Fs through which to access the actual filesystem and operate user B files.Since the IO-Engineering service is started by a superuser, it can access all user files and operates.

    (7) The IO-Engineering operation completed the user B file operation and returned the result to IO-client, thus returning the result to user A, and the complete process for proxy remote access files was completed.

    Note#

    The engine manager (IO-EM) service in the graph above is responsible for stopping the IO-Engineering service.

    The Smart Routing Service (IR) in the above graph is responsible for determining the load of each IO-Engineering and for the balancing redirection of the IO-Client request to send it, and for notifying IOEM to start the new IO-Engineering service if all IO-Engineering is overloaded and IOEM to stop the idle IO-Engineering service when the load is low.

    Through the process above before you can write to you at least:

    From point (5) it is clear that full control of permissions can be achieved and that users can configure their own security rules;

    Features similar to shared storage can be achieved from the remote filesystem service access;

    Multiple filesystems can be supported from points (1) and (2) through different types of incoming FS.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/resultset_file/index.html b/docs/0.11.0/architecture/storage/resultset_file/index.html index e4c9e9437b9..69a2ebfad29 100644 --- a/docs/0.11.0/architecture/storage/resultset_file/index.html +++ b/docs/0.11.0/architecture/storage/resultset_file/index.html @@ -7,7 +7,7 @@ ResultSet File Storage | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    ResultSet File Storage

    Result set file storage scheme-Dolphin file

    1 Background#

    Linkis faces the need to store multiple types of data in files, such as storing Hive table data in files, and hopes to save metadata information such as field types, column names, and comments.

    Existing file storage solutions generally only support specific data types for storage. For example, ORC supports data tables but does not support the storage of unstructured data.

    At the same time, support for saving special characters is also the reason that prompted us to define a new file format. For example, if there are special characters such as line breaks in a field in textFile, the content will be abnormal when it is parsed and read.

    Finally, if the content of the file is too large, Linkis usually hopes to provide a paging function. Existing file storage schemes only support how many bytes are skipped, but do not support how many lines are skipped, or only read a certain line in the file.

    2 Ideas#

    Linkis defines a file storage format Dolphin file that stores multiple data types.

    Dolphin file format

    The file structure of Dolphin is shown in the figure above:

    • The Dolphin logo is stored at the beginning of the file to distinguish whether the file is a Dolphin file

    • Metadata: content metadata information

    • index Data: row length index

    • RowData: Row data.

      RowData stores a row of data, such as the data of a row of the table, including the length of the row data and the Byte information of the row data.

    • PostData: Basic file information

    • PostDataLen: Basic information length

    Among them, PostData is the basic information of the file mainly composed of:

    • type: the type of storage content

    • Codec: encoding format

    • Statistical information: The statistical information of the file content includes the number of lines, the maximum and minimum values, etc.

    3 Implementation#

    The specific process of reading and writing Dolphin files is as follows:

    Dolphin file read and write flow chart

    3.1 Write data to Dolphin#

    When the user needs to store the contents of a file (for example: table) in a Dolphin file, the steps are as follows:

    1. Write Dolphin file ID

    2. Write data type Type

    3. Through the serializer (Serializer), write Metadata (metadata) such as the column name of the table, the type of each column, column comments, etc.;

    4. Pass in a row of data to DolphinWriter, DolphinWriter serializes the row of data through a serializer (Serializer) to obtain the row length and serialized Bytes to write to the Dolphin file;

    5. After writing the row of data, it will update the statistical information (Statistical information), increase the number of row records, update the maximum and minimum values ​​of each column, etc.;

    6. DolphinWriter writes PostData (basic information) composed of statistical information and encoding information to the Dolphin file;

    7. Write the length of PostData to complete the write operation.

    3.2 Read Dolphin file#

    The steps for users to read the contents of the Dolphin file are as follows:

    1. Read the Dolphin file ID, and throw an exception if it is not a Dolphin file;

    2. If the user only needs to read Statistical information, read the length of PostData, and obtain PostData according to the length.

      Through PostData, the basic information is parsed into corresponding Type, Codec, MetaData, and Statistical information.

      Return to complete this reading operation.

    3. If the user wants to read data, first read the data type Type.

    4. Read the Metadata information, get the Deserializer through Type, and encapsulate the read Bytes data into MetaData

    5. Read the row length index, and read the row Bytes through the row length index. Obtain Deserializer through Type, convert Bytes into Record data, and encapsulate RowData with Record and MetaData;

    6. The read RowData row content is given to the user to complete the entire reading.

    3.3 Skip#

    Question: How to read a row? How many lines to start reading?

    Answer: When writing a row, the row length index will be written first, so that the user can read the index and skip row reading through the row length index when reading;

    3.4 Serialization#

    The serializer (Serializer) serializes the data into a byte array, and the deserializer (Deserializer) parses the byte array into string data to achieve correct reading and writing of special characters;

    Serializer and Deserializer are related to Type. Different data types can define different Serializer and Deserializer.

    Dolphin provides a common interface for user-defined implementations to support other types of files.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html b/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html index 5fa6d02a562..5eb3379a076 100644 --- a/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html +++ b/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html @@ -7,7 +7,7 @@ Asynchronous Pool Call | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Asynchronous Pool Call

    How UJES implements full asynchronous thread pool calls

    1 Full Asynchronous Thread Pool for Advantage#

    • 5 Asynchronous Message Queue and Thread Pool

    • Job's thread less than 1 ms per occupation

    • You can accept more than 10,000 + TPS permanent Jobs per entry

    2 How to Implement#

    Full-asynchronous call thread pool

    • How can you improve the upper's request through?

      Entrance WebSocket Processors, internalize a processing thread pool and handler queue to receive the top requests from Spring Cloud Gateway routes.

    • How to ensure that different users in different systems are segregated from one another?

      Entrance Jobschedule, each user of each system has a dedicated thread to ensure isolation.

    • How to ensure job execution?

      The Job Execution Pool is used only for the submission of Job, and once the Job is submitted to Engineering, the horse is placed in the Job's execution queue to ensure that each Job's occupation of the execution pool thread does not exceed 1 millisecond.

      The RPC requests the pool to receive and process engineered logs, progress, status and resultsets and to update the Job's information in real time.

    • How can Job's logs, progress, and status be pushed to the top of the system in real time?

      WebSocket Send Pool, dedicated to processing Job's log, progress and state, and push information to the top system.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html b/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html index 2a1949496fe..62ed720eb79 100644 --- a/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html +++ b/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html @@ -7,7 +7,7 @@ Spark Engine File Import Export | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Spark Engine File Import Export

    1 Background#

    Data analysts or data warehouses are often required to export data from databases to Excel files for data analysis, or to export data to Excel for users or co-operators.

    Furthermore, users often need to undertake joint analyses of data files such as CSV, Excel and online Hive databases, which need to be imported into the Hive database.

    For more confidential industries, such as banks, data exports often require sensitive export fields such as identity cards, mobile phone numbers.

    2 Thinking#

    Using Spark's distributed computing capability and supporting DataSource, which connects multiple data sources.

    3 Implementation#

    3.1 Export#

    The export process is shown below in graph:

    Export process

    1. The user selects the corresponding data source and the corresponding data form to be exported, such as the user order form in the:Mysql library;

    2. User defines the query statement of data to be exported from the data table, as well as the data transformation to the specified column.

      For example,:defines the export of order forms for the last six months and dissociates user information;

    3. User selects file formats and output paths to export, e.g.:export user order form to excel, path to /home/username/orders.xlsx

    4. Spark read corresponding data based on user configured data sources and tables and querying statements. DataSource supports multiple data storage components such as:Hive,Mysql, Oracle,HDF,Hbase,Mongodb

    5. The data is then processed to DataFrame according to the data conversion format configured by the user

    6. Gets the file write object according to the file format type of the user configuration, e.g.:supports the file writing object for Spark's Excel.Writer's support for multiple file formats such as Excel, exce, Json

    7. Write the corresponding data via writer to the corresponding destination, e.g.:/home/username/orders.xlsx.

    3.2 Import#

    Import process below:

    Import process

    1. The user selects the exported file. File readers will read from incoming files: e.g.:/home/username/orders.xlsx;

    2. Readers read the contents of the previous N line for data type extrapolations, such as reading 10 lines.Reader supports reading in multiple file formats.

    3. Data type extrapolators use the first 10 lines of incoming data type to determine the type of data in each column. The method is to determine the data type in each row by determining the type of data and ultimately by determining the number of times the type appears, and to return to the user.

      e.g.:user:String,orderId:Int;

    4. User selected data sources to import, e.g.:Mysql.Import data also supports multiple selections;

    5. The user chooses whether to create a new tree or rewrite the data or add the data.Select user order form and select data appending;

    6. User-defined data import transformation format and imported column information, such as:decrypting user information

    7. The scheme uses Spark and transforms the file to DataFrame via user incoming data to events and column information;

    8. Generate the corresponding Datasupply via the data source selected by the user

    9. Import processed DataFrame via Datasource to the corresponding data source, e.g.:Mysql library.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/ujes_design/index.html b/docs/0.11.0/architecture/ujes/ujes_design/index.html index 02ce7156d3c..62107a73f0e 100644 --- a/docs/0.11.0/architecture/ujes/ujes_design/index.html +++ b/docs/0.11.0/architecture/ujes/ujes_design/index.html @@ -7,7 +7,7 @@ UJES Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    UJES Design

    1 Document Overview#

    1.1 Project background#

    UJES (Unified Job Execution Service), one of Linkis's core components is the Integrated Job Enforcement Service.The project has provided a new generic framework programme for large data ecosystems in the way they are implemented in a microservice framework, which addresses some of the pain points in the use of existing open source projects of the same kind on the market.

    This document is suitable for reading large data functional platforms, in particular the hadoop data ecosphere, with some work experience or interest in learning.

    1.2 Interpretation of terms#

    These terms are explained in this section by the terminology used later in the document for some items.

    1) Gateway:

    UJES's gateway, plugin enhancements based on Spring Cloud Gateway have been introduced, and WebSocket has been added to a multi-capacity gateway service, mainly for forwarding user requests to specified microservices.

    2) Access microservices:

    UJES's Entrance Microservice is the manager of a user's class of assignments.From job generation to submission to execution engine, to assignment feedback to users and operations closed, access microservices managed the life cycle of an operation.

    3) Engine manager:

    UJES's Engine Manager is a microservice that handles start-up engine requests, while also monitoring the life state of the engine.

    4) Execution Engine:

    UJES's implementation engine is a truly microservice to perform user assignments, launched by the Engine Manager and interacts with the portals submitted to it, correctly executes operations and feeds the user needs information.

    5) Application manager:

    UJES's Application Management Microservice is the maintainer of the implementation engine instance information in the cluster, and the entrance microservice always needs this information to get an available implementation engine.

    2 Overall architecture#

    The correct and secure connection of users and data and the provision of powerful and easy data job submissions to users is the goal of the UJES project.

    UJES is positioned as a bridge between the upper application system and the lower computing storage engine.

    Users only need to submit their large data jobs to UJES; UJES will submit them to the lower computing storage engine for execution. The logs, status, progress, results of the operation will be returned from UJES to the user in real time.

    The overall structure of UJES is shown in figure 2.1.

    UJES Overall Architecture

    As shown in the graph above, the UJES framework, which is located between the upper and lower computing application systems, is the managerial role of the user operation, encapsulates data storage, computing and other functions of the large data cluster, provides a uniform operational submission framework, and users no longer need to distinguish between types such as spark or hive, but only to UJES, can properly put clusters at their own service and save for significant learning costs for users.

    3 Logical architecture#

    The UJES's logical framework is designed based on the prevailing microservice architecture model.

    The micro-service framework promotes the division of back-office services into a small group of services, which are coordinated and mutually reinforcing.

    Minor scale communication mechanisms are used between micro-services and micro-services (usually HTTP-based Retful API).

    This architecture model has the advantages of logic, simple deployment, extension, technical isomers, and high reliability.

    The logical structure of UJES is shown in figure 3.1.

    UJES Logical Architecture

    3.1 UJES Operational Main Process#

    A full example of the main process of describing the operation of the UJES project and the functionality of the diagram microservice component will be described in more detail after the main process.

    • 1. user submission assignment, gateway forwarding

      User A submits its own major data assignment to UJES's gateway microservice, such as through Restful or Websocket, which will forward users' requests to the specified entrance microservice in accordance with the specified type of operation, if the user submits a spark-sql code, the gateway will submit the assignment to Spark's entrance microservice.

      Since the access microservice can be deployed in multiple cases, the gateway is transferred to suitable microservices examples in accordance with the strategy of carrying equilibrium.

    • 2.Entry procedure for parsing, checking

      Once the user's job is forwarded to Spark's entrance microservice, the parser in the entrance resolves the user's submitted job into a running task and the persistent task is perpetuated into the database.

      Pre-set interceptors also perform custom variable replacements, malicious code checks, etc. of scripts.

      If a user's request is blocked, his code will not be submitted to the cluster for execution.

    • Setup for listener

      Information generated by the operation of the task will need to be processed once it is produced, such as display to the user or perpetuation to the database, which generally requires the use of event aggregates and listeners, and therefore various types of listeners for the task.

    • 4.Task Access Scheduler

      Once the task is generated, it will enter the scheduler pending schedule.

      The core concept in the Scheduler is the consumer queue, which is identified by the consumer group, which is usually identified by both the user's system and the user, and the consumer group can be marked Test_Anna, if the user submits a task to UJES in the system Test.

      Once the task enters the scheduler it is placed in a different consumer queue waiting for the schedule, based on the group identifier.

      Consumer queue threads are generally implemented in a single thread.

    • Application management micro-service work - providing implementation engine

      Once the task is deployed, the entrance microservice will require an application for implementation through the engine application to the application management microservice.

      The application management microservice will see whether there are engines that users can use in the cluster based on the user's consumer group information. If there is an implementation engine that can be used by the consumer group, the information from the engine will be returned to the entrance microservice and the entrance microservice will refer the task to the executive engine for implementation.

      If the application management microservice finds that there is no engine that the group can use in the cluster, a new implementation engine will be requested from the Engine Manager microservices.

      The consumption thread of the entrance microservice will wait until the app manages the microservice return engine for information that is successful, failed, or timed out.

    • 6. Engine Manager Microservice - Start and Manage Engine

      Engine manager microservice is a microservice that initiates and manages the execution engine.

      When the engine manager receives a request to launch a new engine for the application management microservice, the request will carry user's consumer group information. The engine manager will apply for resources from the resource manager based on the consumer group information. If the user still has sufficient resources, the resource manager will allow the engine manager to start a new engine for the user and broadcast to the application management service microservice.

    • 7. Access microservice submission task to execution engine

      After step 7, the application management microservice has acquired information on the newly launched engine, and the application management microservice returns information from the engine to the entrance microservice, which then submits tasks to the change engine.

    • 8. Entrance interaction with engine

      Once the task is submitted to the implementation engine, the task is run with logs, progress and results information, which is returned to the entrance micro service through the RPC. The return information is carried with the unique identification information of the mission, which is correctly processed by the entrance microservice.

    • Completion of the task

      Once the task is run on the execution engine, the successful or failed state information is returned to the entrance microservice. After the task status in the entrance microservice, the consumer queue will continue to consume tasks in the queue.

    3.2 Details of the architecture and optimization#

    In addition to the main processes described in sub-section 3.1, UJES has its own processes for cluster management and performance enhancement.

    • Task classification and diverse consumption patterns

      Mandates can be categorized according to their own characteristics: completely new missions, retrial missions, duplication of tasks, etc.

      A new task is a new task submitted by the user, a retry task is a task that requires a retry to run a failure in certain circumstances, and a duplication of tasks is a task that is consistent with previous submissions.

      After the task enters the scheduler consumer queue, if it is a new task, it will enter the Consumer's (Consumer) of FIFO for consumption and, in case of duplication, will enter ReUse's consumer for consumption, which will do much more than FIFO and return the results of the previous task to the user.

    • Control of co-generation of engines

      In UJES, an engine that a user can start is controlled, e.g. a user can start up up to three Spark engines.The control of concomitant traffic is ensured by the combination of microservices and microservices for resource managers.

      At most three active tasks per user in the access microservice will be used, so that only three engines will be used.The resource management microservice also provides assurances that if a user is to launch a fourth engine, the engine manager needs to request resources from the resource management microservice, the resource management microservice will refuse to provide resources to that user on the grounds that the number of engines exceeds the limit and the fourth engine will fail.

    • 3. Execute engine heart and unhealthy engines

      The application management microservice needs to perform a core leapfrogging with engines after they get information on the engine to ensure that the engine process is still alive.

      If the engine does not jump back for a period of time, it will be added to the unhealthy engine, so they will not be used when requesting the engine.

    • 4. Natural demise of engines and active killing by users

      The presence of engines will take up cluster resources, particularly the Spark engine, which will take more queue resources, so if the engine manager detects that an execution engine is not used for a long time, then it will be necessary to kill the engine, free the resources of the cluster, and then broadcast to the application manager after the correct killing of the engine.

      Users will also be willing to be active when using UJES. Users will submit a request to the gateway, the gateway will be forwarded to the engine manager, and the engine manager will kill the engine.

    • Tenant segregation

      Multi-Tenant segregation is an important function of the Big Data Functional Platform, and UJES is structurally supportive of multi-tenant segregation, in conjunction with the Hadoop Ecoosphere component.

      User assignments are performed on the execution engine, and UJES's Resource Management Microservice switches to the user to execute system commands when launching a new execution engine, so that the execution engine process is the user's permission, which is completely isolated from engines initiated by other users, thus realizing the multi-tenant separation function.

    • Smart diagnosis

      Smart Diagnostics is a UJES-fine-tuned module where large data operations are often performed with a large amount of data for calculations, as well as a large amount of resources in the cluster and a longer time for an operation.

      Users always want to receive feedback from clusters, such as whether data are tilted and whether queue resources are sufficient.

      Smart diagnostics are designed for this need, and diagnostic modules can analyze the resources and data of the user's job when the job is running, and transmit the content of the analysis to the user in real time.

    4 Interface Design#

    4.1 External Interface Design#

    UJES External Interface means interfaces with users and clusters.

    • 1 User Interface

      UJES’s user access to UJES is usually in Retful and WebSocket.

      Users are required to encapsulate their requests into Json in the prescribed format, and then submit their requests through the post system.

      It is recommended that users access UJES using a web-based approach.The regulation of data exchange will be given after the text.

    • 2 Cluster interface

      UJES's interaction with clusters is determined by engine type.

      As shown in figure 2.1, UJES's implementation engine cuts across the UJES and cluster levels.

      As an example, the Spark execution engine interacts with clusters through the Driver API provided by Spark.

      When using the UJES framework, users can interface with clusters or other server resources according to their needs and characteristics.

    4.2 Framework interface design#

    UJES serves as a framework in which framework developers can access development according to their needs.

    Access to the framework is generally based on SDK, and the UJES framework needs to be implemented for the following interfaces after users have introduced the SDK to UJES via dependency management such as Maven or gradle.

    1) Access interface to entrance microservices

    2) Engine Manager Access Interface

    3) Engine Access Interface

    Allows you to view the UJES's access document.

    4.3 Internal Functional Module Interface Design#

    Interactions between UJES internal functionality modules are based on the Fign-based RPC method. Linkis RPC schema pleaseclick here

    UJES's Entrance, EngineManager and Engineering all communicate via Linkis PRC.

    In particular, the interaction between Entrance and Engineering.Entrance sends user requests via Sender to Engine's Receiver, Engine Receiver, saves the Sender, the sender of the sending terminal, and submits the executing user's request to send the message back to Entrance once the log/progress/state is in place.

    RPC Framework

    5 Deployment structure#

    5.1 Traditional modes of deployment#

    Please view therapid deployment document.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/websocket/index.html b/docs/0.11.0/architecture/websocket/index.html index efd2b5b4618..3dc62bee35d 100644 --- a/docs/0.11.0/architecture/websocket/index.html +++ b/docs/0.11.0/architecture/websocket/index.html @@ -7,7 +7,7 @@ WebSocket Request | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    WebSocket Request

    Gateway's multi-WebSocket request forward implementation

    1 feature point#

    • Frontend Client and Background WebSocket Microservice more than 1 N support

    • WebSocket Channel All Life Cycle Management

    2 Zuul's Bug#

    Forward WebSocket request is not supported at all.

    3 Spring Cloud Gateway Limitations#

    A WebSocket client can only forward the request to a specific background service and cannot complete a WebSocket client via the gateway API to multiple WebSocket microservices.

    Limitations of Spring Cloud Gateway

    4 Linkis Solution#

    Linkis implemented in Spring Cloud Gateway in WebSocket router transponder to set up WebSocket connections with clients and automatically analyze client WebSocket requests and pass rules to which backend microservice the request is forwarded to the corresponding backend service instance.

    Linkis&#39;s Gateway Scheme

    WebSocket router transponder to build up WebSocket requests for clients, down to multiple WebSocket microservice instances for back-end backenders. In order to implement WebSocket request based on rules to forward clients, the architecture of WebSocket router transponder is:

    WebSocket router schema

    4.1 WebSocket Receiver#

    1) WebSocket receiver is a global filter for Spring Cloud Gateway, which receives client's WebSocket connection request and creates a 1-WebSocket channel for client communication with Spring Cloud Gateway.

    2) At the same time, it listens to the WebSocket channel, sends clients to send incoming requests, obtain essential basic information (such as requests to addresses, uri and users), provide a simple encapsulation and pass to the rulers for processing.

    4.2 Rulers#

    1) Rulers receive a notification from WebSocket Recipient, start processing using rules

    2) URL Ruler

    Linkis defines the client's requested text frame (TextWebSocketFrame) as a JSON string for the following:

    "{'method': '/api/v1/${service}/${uriPath}', 'data': '}"

    where:

    This method is the actual request URI, the previous /api fixed as an API request, v1 refers to the version of the API, service is the name of the requested service, uriPath is the actual request URI.

    Data is actual requested data.

    Get service information by parsing method, pass to step 4.

    3) If the client requests a text frame (TextWebSocketFrame) does not conform to the standard format of the URL Ruler or if the URL Ruler cannot parse service information, then load the user-defined rule to parse service. If all custom rulers cannot parse service messages, then a solution error will be dropped to the client directly; otherwise service information will be passed to the next step directly

    4) Access to service information by step 2 or step 3, when the ruler obtains a list of all healthy microservices from the discovery service (e.g. Eureka), finds all examples of the microservice and selects one of the smallest payload instance to the WebSocket transponder by means of a load equilibrium approach.

    4.3 WebSocket transponder#

    The WebSocket Forwarders are distributed as WebSocket Manager and WebSocket Request Forwarder.

    1) WebSocket Manager

    The WebSocket Manager is responsible for managing the 1-to-1WebSocket connection channel between clients and WebSocket receivers, and the 1-to-multiWebSocket connection channel between WebSocket transponder and backend microservice instances.

    If the client disconnects with WebSocket receiver, the WebSocket Manager will immediately disconnect all related WebSocket transponders from the backend microservice instance by 1 to/multi-WebSocket;

    At the same time, in order to keep all WebSocket transponders and backend microservice instances from being freed for being idle, the WebSocket Manager will always send the backend microservice instance for PingWebSocketFrame.

    2) WebSocket Request Rotor

    WebSocket Request Forwarders get microservice instance information from the Ruler

    Here you need to take note of the distinction between service and service instances:a microservice has multiple instances. Each instance has exactly the same functionality.

    Seek from the WebSocket Manager for the client and the microservice service if there is already a WebSocket transponder to the WebSocket connection channel of the microservice and, if it exists, use the webSocket connection channel to forward the client's request text frame (TextWebSocketFrame); otherwise, create a completely new webSocket connection for the client and this microservice instance and bind the new WebSocket connection and the client to the WebSocket receiver 1 for the 1 WebSocket connection to the web Socket, and then push the information back to the client via the link between the client and the WebSocket receiver.

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html b/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html index 2a79196f972..b3edd989be2 100644 --- a/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html +++ b/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ Install EngineConnPlugin Engine | Apache Linkis - + @@ -18,7 +18,7 @@ sh linkis-daemon.sh restart linkis-engine-plugin-server
    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/production_deployment _guide/index.html b/docs/0.11.0/deployment/production_deployment _guide/index.html index 9ff790fd98b..e25662e3ec8 100644 --- a/docs/0.11.0/deployment/production_deployment _guide/index.html +++ b/docs/0.11.0/deployment/production_deployment _guide/index.html @@ -7,7 +7,7 @@ Production Deployment Reference Guide | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Production Deployment Reference Guide

    1 Introduction#

             Linkis has been running stably on the WeBank big data production platform for more than two years. The development and operation personnel have summarized a set of Linkis production deployment guidelines to Let Linkis exert its maximum performance on the basis of stable operation, while also saving server resources and reducing usage costs. The guide includes two major categories: deployment method selection and parameter configuration. Finally, Linkis has also been tested in the test environment for a long time. We will give our stress test practice and experience in Chapter 4.

    2 Deployment plan selection#

             Linkis's stand-alone deployment is simple, but it cannot be used in a production environment, because too many processes on the same server will make the server too stressful.

             The choice of deployment plan is related to the company’s user scale, user habits, and the number of concurrent users in the cluster. Generally speaking, we will use Linkis At the same time, the number of users and the user's preference for the execution engine are used to make the choice based on the deployment method.

             The following is a detailed description of the number of simultaneous users. Assuming that users prefer spark the most, hive is the second, and it is recommended that the server host memory is 64G or more.

             On the machine where EngineManager is installed, because the user's engine process will be started, the machine's memory load will be relatively high, and other types of microservices will affect the machine The load is relatively low.

             We generally recommend to reserve about 20G on the server where EM is installed for use by the Linux system, EM's own process and other processes, such as 128G memory For the server, after removing the 20G memory, there is still 100G of memory that can be used to start the engine process. For example, if a Spark Driver has 4G memory, then the server can start up to 25 spark engines.

    The formula for calculating the total resources used: Total resources used by Linkis = total memory + total number of cores =

    Number of people online at the same time * (Driver or Hive client memory) + number of people online at the same time * (Driver or Hive client cores)

    For example, if there are 50 people using at the same time, Spark's Driver memory is 2G, Hive Client memory is 2G, and each engine uses two cores, then it is 50 * 2G + 50 * 2 cores = 100G memory + 100 CPU cores

    Convention before parameter configuration (must see):

    1. The parameters are generally configured in linkis.properties of the conf directory in the microservice installation directory, and configured in the form of key=value, such as wds.linkis.enginemanager.cores.max=20. The only exception is that the configuration of engine microservices needs to be configured in linkis-engine.properties.

    2. After the parameter configuration, the microservice needs to be restarted to take effect. After the engine parameter configuration, after the engine manager of the page is killed, restart the engine to take effect

    A reference deployment plan is provided below.

    2.1 The number of simultaneous users 10-50#

    1). The best recommendation for server configuration: 4 servers, named S1, S2, S3, S4

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1SparkEM requires an exclusive server, because it is assumed that the user most prefers spark (if hive is preferred, it can be modified)
    SparkEntranceS2
    HiveEngineManagerS3
    HiveEntranceS2
    PythonEngineManagerS3
    PythonEntranceS2
    Others (Eureka, gateway, etc.)S4If this machine is under too much pressure, you can add another server to deploy services separately

    2). Minimum server configuration: 2 servers

    3). Parameter configuration

    If you need to do this, you need to configure it in linkis.properties and linkis-engine.properties in the conf directory under the microservice installation directory. Parameter configuration is generally divided into two parameter types, Entrance and EngineManager.

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice2000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size100

    b) EngineManager microservice

    Note: Linkis defines the concept of protecting resources. The purpose of protecting resources is to reserve a certain amount of resources. EM will not use up the maximum resources and activate the role of protecting the machine.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.2 Number of concurrent users 50-100#

    1). Recommended server configuration: 7 servers, named S1, S2...S7

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2
    SparkEntranceS5
    HiveEngineManagerS3, S4
    HiveEntranceS5
    PythonEngineManagerS4
    PythonEntranceS4
    Eureka, Gateway, RMS6Eureka and RM require high availability deployment
    PublicService, RM, Datasource, EurekaS7Eureka and RM require high availability deployment

    2). Minimum server configuration: 4 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice3000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size120

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.3 Number of simultaneous users 100-300#

    1). Recommended server configuration: 11 servers, named S1, S2...S11

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4
    SparkEntranceS8
    HiveEngineManagerS5, S6, S7
    HiveEntranceS8
    PythonEngineManagerS9
    PythonEntranceS9
    Eureka, Gateway, RMS10Eureka and RM require high availability deployment
    PublicService, RM, Datasource, Eurekas11Eureka and RM require high availability deployment

    2). Minimum server configuration: 6 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice4000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size150

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.4 Number of concurrent users 300-500#

    1). Recommended server configuration 15 servers, named S1, S2, S3, S4

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4, S5, S6, S7
    SparkEntranceS12
    HiveEngineManagerS8, S9, S10, S11
    HiveEntranceS12
    PythonEngineManagerS13
    PythonEntranceS13
    Eureka, Gateway, RMS14Eureka and RM require high availability deployment
    PublicService, RM, Datasource, Eurekas15Eureka and RM require high availability deployment

    2). Minimum server configuration: 10 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice5000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size150

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.5 The number of simultaneous users is more than 500#

    1). Recommended server configuration: 25 servers, named S1, S2.. S19, S25

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4, S5, S6, S7
    S8, S9, S10
    SparkEntranceS17
    HiveEngineManagerS11,S12,S13,S14,S15,
    S16
    HiveEntranceS17
    PythonEngineManagerS18, S19
    PythonEntranceS20
    Eureka, RMS21Eureka and RM require high availability deployment
    RM, ,EurekaS22Eureka and RM require high availability deployment
    Eureka, PublicServiceS23Eureka and RM require high availability deployment
    Gateway, DatasourceS24

    2). Minimum server configuration: 15 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice5000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size200

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    3 Other general parameter configuration#

    In addition to the two types of microservices, Entrance and EngineManager, Linkis has other microservices that also have their own parameters for configuration.

    3.1 PublicService custom configuration#

    The publicService microservice carries various auxiliary functions run by Linkis, including file editing and saving, and result set reading.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.workspace.filesystem.get.timeoutUsed to specify the timeout time for obtaining the file system10000 (unit is ms)
    wds.linkis.workspace.resultset.download.maxsizeUsed to specify the maximum number of rows of the download result set5000 (up to 5000 downloads) or -1 (full download)

    3.2 Engine Microservice#

    Engine microservices are available at any time, including spark, hive and python engines. The configuration parameters of engine microservices need to be modified in linkis-engine.properties under conf in the EngineManager installation directory.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.engine.max.free.timeUsed to specify how long an engine will be killed if idle3h (meaning that an engine will be automatically killed after three hours of not performing a task)

    4 Summary#

    The deployment plan of Linkis is closely related to how it is used. At the same time, the number of users is the biggest influencing factor. In order to enable users to use it comfortably and reduce the cost of cluster servers, it is necessary for operation and maintenance developers to try and listen to user feedback. If it has been deployed The plan is inappropriate, and the deployment plan needs to be changed in a timely and appropriate manner.

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/quick_deploy/index.html b/docs/0.11.0/deployment/quick_deploy/index.html index 01d384d08c5..48695b1c7cd 100644 --- a/docs/0.11.0/deployment/quick_deploy/index.html +++ b/docs/0.11.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -32,7 +32,7 @@ // 3. Start code execution val jobExecuteResult = client.execute(JobExecuteAction.builder() .setCreator("LinkisClient-Test") //creator, requesting the system name of the Linkis client, used for system-level isolation .addExecuteCode("show tables") //ExecutionCode The code to be executed .setEngineType(EngineType.SPARK) // The execution engine type of Linkis that you want to request, such as Spark hive, etc. .setUser("${username}").build()) //User, request user; used for user-level multi-tenant isolation println("execId: "+ jobExecuteResult.getExecID + ", taskId:" + jobExecuteResult.taskID) // 4. Get the execution status of the script var status = client.status(jobExecuteResult) while(!status.isCompleted) { // 5. Get the execution progress of the script val progress = client.progress(jobExecuteResult) val progressInfo = if(progress.getProgressInfo != null) progress.getProgressInfo.toList else List.empty println("progress: "+ progress.getProgress + ", progressInfo:" + progressInfo) Utils.sleepQuietly(500) status = client.status(jobExecuteResult) } // 6. Get the job information of the script val jobInfo = client.getJobInfo(jobExecuteResult) // 7. Get the list of result sets (if the user submits multiple SQL at a time, multiple result sets will be generated) val resultSet = jobInfo.getResultSetList(client).head // 8. Get a specific result set through a result set information val fileContents = client.resultSet(ResultSetAction.builder().setPath(resultSet).setUser(jobExecuteResult.getUser).build()).getFileContent println("fileContents: "+ fileContents) IOUtils.closeQuietly(client)}
    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/quick_start/index.html b/docs/0.11.0/deployment/quick_start/index.html index 220c6fee2b4..0b59b8e3359 100644 --- a/docs/0.11.0/deployment/quick_start/index.html +++ b/docs/0.11.0/deployment/quick_start/index.html @@ -7,7 +7,7 @@ Quick Start | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Quick Start

    Start script needs to be executed after installation

    1 Start Service#

    Execute the following commands in the installation directory, start all services:

      ./bin/start-all.sh > start.log 2>start_error.log

    2 View successful startup#

    You can view service startup success on the Eureka interface, see method:

    Use http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}, open in browser, see whether the service was registered successfully.

    If you do not specify in config.sh, EUREKA_INSTAL_IP_SPECIALL_IP, then HTTP address is:http://127.0.0.1:20303

    As shown in the figure below, if the following microservices appear on your Eureka homepage, it means that the services have been started successfully and you can provide services to the outside world normally:

    Eureka

    3 Quick Use Linkis#

    Please refer toto quickly use Linkis

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html b/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html index 2d6362707a8..4ca45ad57c6 100644 --- a/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Source Code Directory Structure

    Linkis hierarchical directory structure explanation, if you want to detail Linkis, please check Linkis related architecture design documents

    ├─assembly├─bin├─conf├─core //Core abstraction, which contains all common modules│ ├─cloudModule //Modules that must be introduced by microservices, embedded Jetty + WebSocket + SpringBoot + Jersey│ ├─cloudMybatis //Mybatis module of SpringCloud│ ├─cloudProtocol //General protocol, such as RPC communication between Entrance and Engine│ ├─cloudRPC //RPC module, complex two-way communication based on Feign implementation│ ├─common //Common module, built-in many common tools│ ├─httpclient //Java SDK top-level interface│ └─scheduler //General scheduling module├─db //Database information├─docs //All documents├─eurekaServer //Eureka module├─extensions //plugin│ └─spark-excel //spark supports excel to DF/DF to excel plug-in├─gateway //Gateway module│ ├─core //Gateway core implementation, including authentication/analysis/routing of front-end interfaces│ ├─gateway-httpclient-support //gateway support for Java SDK│ ├─gateway-ujes-support //Analysis and routing support for UJES interface│ └─springcloudgateway //Introduce spring cloud gateway, front-end requests are intercepted from here├─publicService //public service│ ├─application //application module│ ├─bin│ ├─conf│ ├─configuration //Parameter module, get the engine parameters from here│ ├─database //Provide Hive metadata query service│ ├─query //Provide Job Manager and Job History│ ├─udf //UDF module│ ├─variable //User-defined variable module│ └─workspace //Workspace module, manage user scripts├─resourceManager //Resource management service│ ├─resourcemanagerclient //resource management client│ ├─resourcemanagercommon //Common module│ └─resourcemanagerserver //Resource management server├─storage //Unified storage service│ ├─pesIO //Remote storage service│ │ ├─io-engine //The engine side of remote storage, which actually accesses the bottom storage side│ │ ├─io-enginemanager //engineManger for remote storage│ │ └─io-entrance //Request entry for remote storage│ └─storage //Unified external interface for unified storage└─ujes //Unified operation execution service│ ├─client //Java SDK, users can directly access Linkis through Client│ ├─definedEngines //Implemented engines│ │ ├─hive //Hive engine│ │ │ ├─engine //The engine execution end of the actual docking with the underlying Hive│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─pipeline //Import and export engine for mutual conduction between storage systems│ │ │ ├─engine│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─python //stand-alone Python engine│ │ │ ├─engine //The engine execution end that actually docks with the underlying Python│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─spark //spark engine│ │ │ ├─engine //The actual connection to the engine execution end of the underlying Spark│ │ │ ├─enginemanager│ │ │ └─entrance│ │ └─tispark //TiSpark engine, actually docking with TiSpark engine│ ├─engine //General low-level engine module│ ├─enginemanager //General low-level enginemanager module│ ├─entrance //General low-level entrance module│ └─entranceclient //Simplified version of entrance
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/compile_and_package/index.html b/docs/0.11.0/development/compile_and_package/index.html index d577c880f3d..b69c31ddaac 100644 --- a/docs/0.11.0/development/compile_and_package/index.html +++ b/docs/0.11.0/development/compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Compile And Package

    1 Fully compile Linkis#

    Compilation environment requirements: JDK8 or above is required for compilation, and both Oracle/Sun and OpenJDK are supported.

    After obtaining the project code from git, use maven to package the project installation package.

    Please note: The official recommendation is to use Hadoop-2.7.2, Hive-1.2.1, Spark-2.4.3 and Scala-2.11.12 to compile Linkis.

    If you want to use other versions of Hadoop, Hive, and Spark to compile Linkis, you can enter the root directory of the Linkis source code package and manually modify the relevant version information of the pom.xml file, as follows:

        cd incubator-linkis-x.x.x    vim pom.xml
        <properties>              <hadoop.version>2.7.2</hadoop.version>        <hive.version>1.2.1</hive.version>        <spark.version>2.4.3</spark.version>                      <scala.version>2.11.12</scala.version>        <jdk.compile.version>1.8</jdk.compile.version>                  </properties>

    (1) If you are using it locally for the first time, you must first execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn -N install

    (2) Execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn clean install

    (3) Obtain the installation package, under the assembly->target directory of the project:

        wedatasphere-linkis-x.x.x/assembly/target/wedatasphere-linkis-x.x.x-dist.tar.gz

    2 Compile a single service#

    After obtaining the project code from git, use maven to package the project installation package.

    (1) If you are using it locally for the first time, you must first execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn -N install

    (2) Jump to the corresponding module through the command line in the terminal, such as

        cd publicService

    (3) Execute the compile command in the pom.xml directory corresponding to the module:

        mvn clean install

    (4) Obtain the installation package, there will be a compiled package in the ->target directory of the corresponding module:

       target/linkis-publicservice.zip
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/install-server/index.html b/docs/0.11.0/development/install-server/index.html index 53d30c74714..e852f6f125f 100644 --- a/docs/0.11.0/development/install-server/index.html +++ b/docs/0.11.0/development/install-server/index.html @@ -7,7 +7,7 @@ Installation Of A Single Service | Apache Linkis - + @@ -17,7 +17,7 @@
    • Additional parameters added by SparkEngineManager
        ## Configure engine jar    wds.linkis.enginemanager.core.jar=$SERVER_HOME/$SERVERNAME/lib/linkis-ujes-spark-engine-version.jar    ##Configure main jar    wds.linkis.spark.driver.conf.mainjar=$SERVER_HOME/$SERVERNAME/conf:$SERVER_HOME/$SERVERNAME/lib/*
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/new_engine_conn/index.html b/docs/0.11.0/development/new_engine_conn/index.html index ba0f401dc9f..17105693fcb 100644 --- a/docs/0.11.0/development/new_engine_conn/index.html +++ b/docs/0.11.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    How To Quickly Implement A New Engine

    1 General introduction#

            When back-end developers use Linkis, they can not only directly use the execution engine that Linkis has developed, but also use the framework to develop their own applications according to their own needs.

            Linkis can be abstracted into Entrance, EngineManager and Engine modules. Among them, the role and architecture of the Entrance, EngineManager and Engine three modules can be viewed in the UJES architecture design document.

            Users only need to implement the necessary interfaces of the three modules to implement their own Linkis engine.

    2 Access operation#

    2.1 Entrance access#

    2.1.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>linkis-ujes-entrance</artifactId>  <version>0.5.0</version></dependency>

    2.1.2 Interfaces to be implemented#

    Entrance has no interfaces that must be instantiated. The following interfaces can be implemented as needed:

    • EntranceParser. Used to transfer a request from the front end, usually a Json body, into a task that can be persisted. This class has provided AbstractEntranceParser, users only need to implement the parseToTask method, and the system provides CommonEntranceParser implementation by default.

      CommonEntranceParser

    • EngineRequester. Used to obtain a RequestEngine class, which is used to request a new Engine from the EngineManager microservice. Linkis already has an implementation class.

      EngineRequesterImpl

    • Scheduler. It is used to implement scheduling. By default, the scheduling mode of multi-user concurrency and FIFO execution within a single user has been implemented.

      FIFOScheduler

    2.2 EngineManager access#

    2.2.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>linkis-ujes-enginemanager</artifactId>  <version>0.5.0</version></dependency>

    2.2.2 Interfaces to be implemented#

    EngineManager needs to implement the following interfaces as needed:

    • EngineCreator, AbstractEngineCreator already exists, and the createProcessEngineBuilder method needs to be implemented to create an EngineBuilder.

              Here, ProcessEngineBuilder has provided a JavaProcessEngineBuilder class by default, this class is an abstract class, and the necessary classpath, JavaOpts, GC file path, log file The path, and the opening of the DEBUG port in the test mode have been completed.

              Now JavaProcessEngineBuilder, you only need to add additional classpath and JavaOpts.

      AbstractEngineCreator

    • EngineResourceFactory, AbstractEngineResourceFactory already exists, and the getRequestResource method needs to be implemented to get the user's personalized resource request.

      EngineResourceFactory

    • hooks, this is a spring entity bean, mainly used to add pre and post hooks before and after the engine is created and started. The user needs to provide an Array[EngineHook] for dependency injection.

      hooks

              For specific examples, please refer to the implementation of Hive EngineManager.

    • resources, this is a spring entity bean, mainly used for registering resources like RM. Resources are instances of ModuleInfo, which need to be provided by the user for dependency injection.

      resources

    2.3 Engine access#

    2.3.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>Linkis-ujes-engine</artifactId>  <version>0.5.0</version></dependency>

    2.3.2 Interfaces to be implemented#

    1. The interfaces that Engine must implement are as follows:
    • EngineExecutorFactory. To create an EngineExecutor, the createExecutor method needs to be implemented. Specifically, an EngineExecutor is created through a Map that stores parameters.

    EngineExecutorFactory

    • EngineExecutor. The actual real executor is used to submit and execute the code submitted by entrance. Need to implement getActualUsedResources (resources actually used by the engine), executeLine (execute a line of code parsed by CodeParser), executeCompletely (the supplementary method of executeLine, if the call to executeLine returns ExecuteIncomplete, then the new Code and the previous return ExecuteIncomplete The code is passed to the engine for execution at the same time)

    EngineExecutor

    1. The interfaces or beans that the Engine does not have to implement are as follows:
    • engineHooks: Array[EngineHook], is a spring bean. EngineHook is the pre- and post-hook created by the engine. At present, the system has provided two hooks: CodeGeneratorEngineHook is used to load UDFs and functions, and ReleaseEngineHook is used to release idle engines. If not specified, the system will provide engineHooks=Array(ReleaseEngineHook by default )

    engineHooks

    • CodeParser. Used to parse the code so that it can be executed line by line. If not specified, the system defaults to provide a CodeParser that directly returns all codes.

    CodeParser

    • EngineParser, used to convert a RequestTask into a Job that can be submitted to the Scheduler. If not specified, the system will provide an EngineParser that converts the RequestTask into a CommonEngineJob by default.

    EngineParser

    3 Reference examples#

            This section will provide a reference example by introducing the writing of the hive engine.

    3.1 HiveEntrance access#

            According to the description in the second section, Entrance has no interfaces that must be implemented, the code in linkis-0.5.0, and the entry of hive only have two classes. It is only used for error code extension.

    3.2. HiveEngineManager access#

    -1. Implementation of EngineCreator interface

    HiveEngineCreator

            From the above figure, we can see that there is a HiveEngineCreator class in the HiveEM module, which inherits the AbstractEngineCreator class, and also implements the createProcessEngineBuilder method, returning a HiveQLProcessBuilder.

    -2.HiveQLProcessBuilder implementation

            HiveEngineManager has a HiveQLProcessBuilder class in this module, which inherits from JavaProcessEngineBuilder. This class implements a number of necessary interfaces and also overrides the build method. In fact, the parent The build method of the class is complete enough. HiveQLProcessBuilder overrides the build method to obtain the parameters passed in by the user and then add them to the startup command.

    HiveQLProcessBuilder

    -3. AbstractEngineResourceFactory interface implementation

    HiveEngineResourceFactory

            In this instance, we can notify the ResourceManager of the number of CPU cores, memory size, and number of instances that the user wants to obtain each time the user requests the engine

    -4. Injection of resources and hooks bean

    HiveBeans

            From the figure above, we can see that by injecting a Spring configuration

    Configuration, inject two beans, resources and hooks, UJES framework itself will provide @ConditionalMissingBean annotation to inject default beans, developers can inject their own entity beans according to their own needs. In this class instance bean, the user can register the total memory of EngineManager service, the total number of CPU cores, and the total number of instances that can be created in RM.

    3.3 HiveEngine access#

    -1. EngineExecutorFactory interface implementation

            There is a HiveEngineExecutorFactory in the HiveEngine module. At the end of the createExecutor method, the HiveEngineExecutor is returned.

    HiveEngineExecutorFactory

    -2.EngineExecutor interface implementation

            where executeLine is an interface that must be implemented, which is to pass in a line of script separated by CodeParser and return ExecuteResponse (success or failure).

    HiveEngineExecutor

            In addition, executeCompletely also needs to be implemented, which is called when executeLine returns IncompleteReponse.

            Engine has some common methods when performing operations, such as close kill pause progress and other methods that can be implemented according to your needs.

    4 FAQ#

            Welcome to add group questions.

    WeChat group

    - + \ No newline at end of file diff --git a/docs/0.11.0/development/start-server/index.html b/docs/0.11.0/development/start-server/index.html index b3fadfe4704..c44453014bb 100644 --- a/docs/0.11.0/development/start-server/index.html +++ b/docs/0.11.0/development/start-server/index.html @@ -7,7 +7,7 @@ Start Of A Single Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Start Of A Single Service

    1 jump to corresponding service directory#

    e.g. PublicService

        cd linkis-publicservice

    2 Execute Launch#

        sh start-publicservice.sh

    3 Startup Success Check#

    • (1) Judging logs can be viewed by looking at linkis.out
        less -i logs/linkis.out
    • By viewing the Eureka interface

    View service startup on Eureka interface, see method:

    Use http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}, open in browser, see whether the service was registered successfully.

    If your Eureka home page shows a microservice, it indicates that the service is started successfully and can be provided externally:

    Eureka

    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/hive/index.html b/docs/0.11.0/engine_usage/hive/index.html index 0223c238b2e..58e1f1aaa21 100644 --- a/docs/0.11.0/engine_usage/hive/index.html +++ b/docs/0.11.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine | Apache Linkis - + @@ -20,7 +20,7 @@ Figure 2 Hive running effect Figure 2

    2 Hive engine implementation#

            The implementation of the Hive execution engine is to implement the necessary interfaces of the Entrance, EngineManager and Engine three modules with reference to the Linkis development document. The Engine module is the most special, Hive The way of implementation also has its own set of logic.

            The Release version now provided by Linkis is based on hadoop version 2.7.2, hive version is 1.2.1, both are apache versions.

            Linkis's Hive engine interacts with the underlying hive mainly through the HiveEngineExecutor class, which is instantiated by the HiveEngineExecutorFactory bean.

            In the executeLine interface implemented by HiveEngineExecutor, Linkis uses the CommandProcessorFactory class provided by hive to pass in local hive configuration information to obtain an org.apache.hadoop. The hive.ql.Driver class, the Driver class provides an API to help submit the user's script code to the cluster for execution.

            After the driver submits the hive sql code, there is an API to provide whether the execution is successful and to obtain the result set after the success is obtained. If the execution is successful, with the help of the unified storage service provided by Linkis, the result set will be stored in the specified directory for users to view.

            In addition, after the Driver submits hive sql, if a mapreduce task is generated, we can also kill the submitted hive query task through the killRunningJobs API provided by HadoopJobExecHelper , This is the logic of the user's foreground kill task.

            One more thing, Linkis's hive engine also implements a progress function. Specifically, the runningJobs field of HadoopJobExecHelper is used to obtain the running MR tasks, and then these MR tasks have corresponding map and reduce progress. You can get the total progress of the task by doing a mathematical calculation. It should be noted that runningJobs is running The MR job will be deleted from the List once it is executed, so it is necessary to get the execution plan of SQL at the beginning. For details, please refer to the implementation of the code.

    3 Adapt your own hive version#

            Because the current version of Linkis is the apache version that supports 1.2.1, many users' clusters may not be consistent with our company, so you need to recompile the Hive execution engine by yourself .

            For example, if the user is using the 1.1.0 cdh version, he needs to change the hive.version to the specified version in the top-level pom.xml and then Compile.

    When we were adapting, we also found that there was a conflict in the jar package. This requires the user to check the log to eliminate it. If the cause is still unclear, welcome to join the group for consultation.

    WeChat group

    4 Future goals#

    1. Seamlessly adapt to more hive versions.
    2. The deployment method is simpler, try to use the containerized method.
    3. The function is more complete, and it is more accurate and complete in terms of execution progress, data accuracy, etc.
    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/python/index.html b/docs/0.11.0/engine_usage/python/index.html index 12eeeb7a2e0..87e381b7b24 100644 --- a/docs/0.11.0/engine_usage/python/index.html +++ b/docs/0.11.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine | Apache Linkis - + @@ -20,7 +20,7 @@ Figure 3 Spark running effect Figure 2

    2 Implementation of Python engine#

            The implementation of the Linkis-Python execution engine is based on How to implement a new engine to implement the Entrance, EngineManager and Engine three The necessary interface of the module.

            The implementation of the execution module uses the py4j framework to allow the python executor to interact with the JVM. After the user submits the code, the JVM submits the code to the py4j framework The python interpreter executes and gets the output or error message from the python process.

            Specifically, you can view the python.py source code in the python execution source code. There are several python methods defined by Linkis for process interaction.

    3 Future goals#

    1. The deployment method is simpler, try to use the containerized method.
    2. Support the submission of spark jar package
    3. Better support the submission of spark's yarn-cluster mode.
    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/spark/index.html b/docs/0.11.0/engine_usage/spark/index.html index a0973d55d4b..3955ad70e26 100644 --- a/docs/0.11.0/engine_usage/spark/index.html +++ b/docs/0.11.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine | Apache Linkis - + @@ -23,7 +23,7 @@ -3. Better support for spark's yarn-cluster submission.

    - + \ No newline at end of file diff --git a/docs/0.11.0/introduction/index.html b/docs/0.11.0/introduction/index.html index 26fd7374701..923bc2623d3 100644 --- a/docs/0.11.0/introduction/index.html +++ b/docs/0.11.0/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Introduction

    Linkis is an open source for micro-banks that addresses connectivity, access and reuse issues between front-office tools, applications, and various computing storage engines.

    Introduction#

    Linkis Github repo: https://github.com/apache/incubator-linkis

    Linkis, a single computing storage engine such as Spark, TiSpark, Hive, Python and HBase, provides a unified REST/WebSocket/JDBC interface to submit data intermediaries for implementation of SQL, Pyspark, HiveQL, Scala.

    Linkis, based on microservice structures, provides enterprise-level features such as financial multi-tenant segregation, resource controls, segregation of competencies, supports uniform variables, UDF, functionality, user resource document management, high-parallel, high-performance and high-availability large data operations/requests for life-cycle management capabilities.

    Background#

    The widespread use of large data technologies has led to a proliferation of upper-tier applications and lower computing engines.

    Business needs are met through the introduction of multiple open source components, and the continuous updating and enrichment of the large data platform architecture is a common practice for almost all enterprises at this stage.

    As shown in the graph below, when our upper-tier applications, tool systems and bottom computing storage components become more frequent, the entire data platform becomes a network structure as shown above.


    Raw Data Ecological Map


    Continuously introducing new components to achieve business needs, more and more pain points have also arisen:

    1. Business needs vary from one end to another, upper layers of components are unique and users use them to break up with a strong sense of fragmentation and high learning costs.

    2. The diversity of data, the complexity of storing and computing is such that a component usually solves only one problem and developers must have a well-developed technical stack.

    3. The introduction of new components, such as multiple tenants segregation, user resource management and user permissions management, are not compatible with pre-existing data platforms, and customized development from the top down, not only works large but also replicates rotates.

    4. The upper-tier application directly interfaces the bottom computing storage engine and will directly affect the normal use of business products as soon as the background changes occur.

    Original design intention#

    How to provide a uniform data intermediary, block all calls and usage details from the bottom, and really get business users to focus only on the realization of the business, even if the extension and overall relocation of the bottom platform is not affected, the Linkis's original design!

    Linkis Solution

    Technical architecture#

    Technical architecture

    As shown in the graph above, we have created several new microservice clusters based on SpringCloud microservice technology to build Linkis's intermediate capacity as well.

    Each microservice cluster assumes a part of the functional responsibilities of the system, which we have clearly delineated as follows.e.g.:

    • Unified job execution service:A distributed REST/WebSocket service to receive various access requests from the parent system.

      Currently supported computing engines have:Spark, Python, TiSpark, Hive and Shell.

      Supported script languages include:SparkSQL, Spark Scala, Pyspark, R, Python, HQL and Shell;

    • Resource management service: supports real-time control of resource use by each system and user, limits resource usage and confluence of systems and users, and provides real-time resource dynamics graphs to facilitate access to and management of system and user resources;

      Currently supported resource types:Yarn queue resources, servers (CPU and memory), number of concurrent users etc.

    • Unified storage service:Universal IO, capable of fast interfacing various storage systems, providing a uniform call entry, supporting all commonly used formats, having a high degree of integration, simple usage;
    • Unified context service:Unify user and system resource files (user script, JAR, ZIP, Properties, etc.), for users, systems, computing engine parameters and variable management in one setting, automatically reference;
    • Repository services:Systems and user level material management, shared and flowed to support automatic management throughout the life cycle;
    • Metadata service:Real-time Hive Library Structure and Partition Show.

    Building on the interaction of these microservice clusters, we have improved the way and process of external service across the large data platform.

    Business architecture#

    Operational framework

    Name Explanation:

    1) Gateway gateway:

    Plugins are enhanced based on Spring Cloud Gateway, adding more than 1 N support for front-end and background WebSocket Microservice (detailed architecture implementation, mainly for parsing and routing users requests to specified microservices.

    2) Unified entry:

    Unified entry is a job life cycle manager for a class of engines.

    Entrance manages the entire lifecycle of an operation from the receiving job, the assignment submission to the execution engine, the assignment execution feedback to the user and the assignment completion.

    3) Engine manager:

    Engine managers manage the engine throughout its life cycle.

    It is responsible for requesting and locking resources from the resource management service and for instantiating new engines, as well as monitoring the life state of the engine.

    4) Execution Engine:

    The execution engine is a microservice that truly executes user assignments. It is started by the engine manager.

    In order to enhance interaction, the execution engine interacts directly with the Unified Entrance and delivers the log, progress, status and results of the execution in real time to the Unified Entrance.

    5) Resource management services

    Real-time controls the use of resources per system and per user, the use and actual load of the engine manager, and limits the use and concomitant use of resources by systems and users.

    6) Eureka

    Eureka is the service discovery framework developed by Netflix, and SpringCloud is integrated into its subproject spring-cloud-netflix to achieve SpringCloud service discovery features.

    Eureka Client is built into each microservice and has access to Eureka Server and ability to find service in real time.

    Processes#

    How does Linkis handle a SparkSQL submission from the parent system?

    Process time series

    1. Users of the upper system submit a SQL, first passing through Gateway, Gateway, which resolves user requests and routes them to the appropriate Unified Entry Entrance

    2. The entry will first look for whether the user of the system has the available Spark engine service and, if so, will submit the request directly to the Spark Engine Service

    3. No Spark Engine service available, start finding features via Eureka service registration, get a list of all engine managers and get the actual load of engine managers by requesting RM in real time

    4. Entrance gets the lowest payload engine manager, starting to require the engine manager to start a Spark engine service

    5. The Engine Manager received a request and started asking the user under RM if the new engine could be started

    6. Start requesting resources and locking if you can start; otherwise return the failed exception gives Entrance

    7. Successfully locked the resource. Start the new spark engine service; return the new Spark engine to Entrance after successful startup

    8. When Entrance gets a new engine, start requesting SQL execution from the new engine

    9. Spark new engine received SQL requests, started submitting SQL to Yarn, and sent logs, progress and status to Entrance in real time

    10. Entrance delivers logs, progress and status to Gateway in real time

    11. Gateway Back Logs, Progress, and Status to Frontend

    12. Once SQL has been successfully executed, Engineering has taken the initiative to push the results set to Entrance, Entrance notifies the frontend to obtain the results.

    How to ensure high real-time#

    It is well known that Spring Cloud is integrated as a communication tool between microservices.

    HTTP interface calls between Feign-based microservices only support an instance of random access to BMS under simple rules.

    But what does Linkis do to do with the implementation engine of Linkis, which can directly push logs, progress and status to the single entry for which it is requested?

    Linkis has implemented its own base RPC communication program based on Feign.

    Linkis RPC Architecture

    As shown in the graph above, we have encapsulated Sender and Receiver on the basis of Feign.

    Sender is directly available as a sender. Users can specify a microservice instance or randomly access it, and support broadcasts.

    Receiver, as the receiving end, requires users to implement the Receiver-interface to handle the true business logic.

    Sender offers three types of visits, as follows::

    1. The ask method is the synchronous request response method, which requires the receiving end to be synchronized with the response;

    2. Send's method is syncing the request method, only for synchronizing sending requests to the receiving end and not requesting answers from the receiving end;

    3. Delivery is an asynchronous request method, as long as the process at the sending end does not exit, the request will be sent to the receiving end later through other threads.

    How to support high concurrency#

    Linkis designed 5 Asynchronous Message Queue and Thread Pools, with Jobs using less than 1 milliseconds per occupation to ensure that more than 10,000 + TPS Resident Job requests can be accepted per single entry.

    Full-asynchronous call thread pool

    • How can you improve the upper's request through?

      Entrance WebSocket Processors, internalize a processing thread pool and handler queue to receive the top requests from Spring Cloud Gateway routes.

    • How to ensure that different users in different systems are segregated from one another?

      Entrance Jobschedule, each user of each system has a dedicated thread to ensure isolation.

    • How to ensure job execution?

      The Job Execution Pool is used only for the submission of Job, and once the Job is submitted to Engineering, the horse is placed in the Job's execution queue to ensure that each Job's occupation of the execution pool thread does not exceed 1 millisecond.

      The RPC requests the pool to receive and process engineered logs, progress, status and resultsets and to update the Job's information in real time.

    • How can Job's logs, progress, and status be pushed to the top of the system in real time?

      WebSocket Send Pool, dedicated to processing Job's log, progress and state, and push information to the top system.

    User-Level Isolation and Scheduling Timeliness#

    Linkis has designed the Scheduler module, the group schedule consumption module that can be intelligently monitored and expanded to achieve Linkis’s high combined capacity.

    Group Scheduler Cost Architecture

    Each user of each system is grouped separately to ensure segregation at system level and user level.

    Each consumer has an independent control thread, measuring the length of the consumer waiting queue, the number of events being implemented and the proportion of time spent growing.

    The consumer conglomerates set thresholds and warning ratios for these indicators, and the control thread is immediately extended as soon as an indicator exceeds the threshold, and the ratio between or between indicators exceeds the limit (e.g., monitoring to the average implementation time is greater than the distribution interval parameter).

    When extended, the above reference process is fully utilized, with a specific parameter being targeted and other parameters automatically extended.

    Summary#

    Linkis, as a data intermediary, has made many attempts and efforts to block details of lower level calls.

    Like:Linkis, how to implement the Unified Storage Service?How can Linkis unify UDF, function and user variables?

    Due to space limitations, this paper is no longer discussed in detail, and you are welcome to visit ourofficial networkhttps://linkis.apache.org

    Is there a set of truly open-source, self-developed and well-developed financial production environments and scenes that can be returned to the middle of data in the open source communities so that people can be relatively comfortable taking services for production, supporting financial-grade operations, and securing business-class characteristics?

    We want Linkis to be the answer.

    At the same time, we look forward to more community strength to work together to promote Linkis's growth.

    - + \ No newline at end of file diff --git a/docs/0.11.0/tags/index.html b/docs/0.11.0/tags/index.html index d62f42b8799..f96c555e80b 100644 --- a/docs/0.11.0/tags/index.html +++ b/docs/0.11.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html b/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html index 4b4a0963495..9b999cc5e97 100644 --- a/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html +++ b/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.9.0 To 0.9.1 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ The eureka module does not need to be updated

    2.2 Only modules that need to be updated#

          You only need to upgrade the Linkis-related modules to 0.9.1:

    1. linkis-gateway
    2. linkis-resourceManager
    3. linkis-ujes-hive-enginemanager
    4. linkis-ujes-hive-entrance
    5. linkis-ujes-jdbc-entrance
    6. linkis-ujes-python-entrance
    7. linkis-ujes-spark-entrance

    Upgrade steps:

    1. Delete the 0.9.0 package

    2. Unzip the corresponding service directory and copy the package to the corresponding lib directory

    Linkis-gateway needs to modify the configuration of linkis.properties:

    #Add parameterswds.linkis.gateway.conf.enable.token.auth=true#Modify the following parameterswds.linkis.gateway.conf.url.pass.auth=/dws/

    Linkis-gateway needs to copy the proxy configuration token.properties to the conf directory:

    2.3 Add material library related packages#

    Need to increase the module of the material library related package

    1.linkis-publicservice added bml support and added bml client

    linkis-bmlclient-0.9.1.jarlinkis-bmlcommon-0.9.1.jarlinkis-gateway-httpclient-support-0.9.1.jarlinkis-httpclient-0.9.1.jar

    In addition, the netty package has been added:

    netty-3.6.2.Final.jar

    In addition, you need to configure the gateway address in linkis.properties:

    wds.linkis.gateway.ip=127.0.0.1wds.linkis.gateway.port=9001
    1. linkis-ujes-python-enginemanager and linkis-ujes-spark-enginemanager added bml support and added bml client
    linkis-bmlclient-0.9.1.jarlinkis-bmlcommon-0.9.1.jarlinkis-bml-hook-0.9.1.jarlinkis-gateway-httpclient-support-0.9.1.jarlinkis-httpclient-0.9.1.jar

    Upgrade steps:

    1. Delete the 0.9.0 package

    2. Unzip the corresponding service directory and copy the package to the corresponding lib directory

    2.4 Services that need to update configuration and package#

    The service that needs to update the configuration and package: linkis-metadata

          After decompressing the linkis-metadata installation package, you need to modify the configuration in the conf:

    1. Application.yml modify eureka address
    2. linkis.properties configure Linkis database and Hive metadata database address configuration:
    //Linkis database connection informationwds.linkis.server.mybatis.datasource.url=jdbc:mysql://wds.linkis.server.mybatis.datasource.username=wds.linkis.server.mybatis.datasource.password=//Hive metabase address is not hiveServer2hive.meta.url=hive.meta.user=hive.meta.password=

    2.5 Newly added services#

    Newly added service: linkis-bml

    After downloading the linkis-bml installation package and decompressing it, modify the configuration in conf:

    1. Application.yml modify eureka address
    2. linkis.properties configure Mybatis related configuration:
    wds.linkis.server.mybatis.datasource.url=jdbc:mysql://wds.linkis.server.mybatis.datasource.username=wds.linkis.server.mybatis.datasource.password=
    1. Import the sql data of bml to mysql
    cd db/;source linkis-bml.sql
    - + \ No newline at end of file diff --git a/docs/0.11.0/user_guide/1.0_sdk_manual/index.html b/docs/0.11.0/user_guide/1.0_sdk_manual/index.html index 65146a3662a..662d5387ab4 100644 --- a/docs/0.11.0/user_guide/1.0_sdk_manual/index.html +++ b/docs/0.11.0/user_guide/1.0_sdk_manual/index.html @@ -7,7 +7,7 @@ Use of 1.0 SDK | Apache Linkis - + @@ -58,7 +58,7 @@
    - + \ No newline at end of file diff --git a/docs/0.11.0/user_guide/X_sdk_manual/index.html b/docs/0.11.0/user_guide/X_sdk_manual/index.html index c6e82f35174..f1e2e381619 100644 --- a/docs/0.11.0/user_guide/X_sdk_manual/index.html +++ b/docs/0.11.0/user_guide/X_sdk_manual/index.html @@ -7,7 +7,7 @@ Use of 0.X SDK | Apache Linkis - + @@ -33,7 +33,7 @@ // 6. Get the job information of the script val jobInfo = client.getJobInfo(jobExecuteResult) // 7. Get the list of result sets (if the user submits multiple SQL at a time, multiple result sets will be generated) val resultSetList = jobInfoResult.getResultSetList(client) println("All result set list:") resultSetList.foreach(println) val oneResultSet = jobInfo.getResultSetList(client).head // 8. Get a specific result set through a result set information val fileContents = client.resultSet(ResultSetAction.builder().setPath(oneResultSet).setUser(jobExecuteResult.getUser).build()).getFileContent println("First fileContents: ") println(fileContents) } catch { case e: Exception => { e.printStackTrace() } } IOUtils.closeQuietly(client)}
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/jdbc_api/index.html b/docs/1.0.2/api/jdbc_api/index.html index 80d35d498a8..12fb5013a3c 100644 --- a/docs/1.0.2/api/jdbc_api/index.html +++ b/docs/1.0.2/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/linkis_task_operator/index.html b/docs/1.0.2/api/linkis_task_operator/index.html index 7633a1b96ab..e7c9dddaf2f 100644 --- a/docs/1.0.2/api/linkis_task_operator/index.html +++ b/docs/1.0.2/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/login_api/index.html b/docs/1.0.2/api/login_api/index.html index d1371a41cb4..cdc17efc26d 100644 --- a/docs/1.0.2/api/login_api/index.html +++ b/docs/1.0.2/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/overview/index.html b/docs/1.0.2/api/overview/index.html index 8d44627fee7..6b482362b2c 100644 --- a/docs/1.0.2/api/overview/index.html +++ b/docs/1.0.2/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/add_an_engine_conn/index.html b/docs/1.0.2/architecture/add_an_engine_conn/index.html index cae1ffd6943..5b0747798ce 100644 --- a/docs/1.0.2/architecture/add_an_engine_conn/index.html +++ b/docs/1.0.2/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/commons/message_scheduler/index.html b/docs/1.0.2/architecture/commons/message_scheduler/index.html index 4a144802514..07ccbd127e9 100644 --- a/docs/1.0.2/architecture/commons/message_scheduler/index.html +++ b/docs/1.0.2/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Sceduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/commons/rpc/index.html b/docs/1.0.2/architecture/commons/rpc/index.html index a95fccf2eee..78448c4931f 100644 --- a/docs/1.0.2/architecture/commons/rpc/index.html +++ b/docs/1.0.2/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html index 399100b91bf..89ded7a13ee 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, micro service management, and historical task query for other micro service modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session Session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 1b95685925e..6cbfdb16526 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 2b11fd553b1..5ed0c482bd1 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/entrance/index.html b/docs/1.0.2/architecture/computation_governance_services/entrance/index.html index 5845f097ae2..b300bc03c6a 100644 --- a/docs/1.0.2/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html index 813d86ad50f..6e64fe4003e 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 7bdb93f8f49..59ef43d6672 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html index b70a5880f79..3357fa7dc62 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html index 5f895c751ff..af461c250b1 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index dd6e232abf0..1f11f0ead1f 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/overview/index.html b/docs/1.0.2/architecture/computation_governance_services/overview/index.html index ff741efed0e..7d4e6808dfb 100644 --- a/docs/1.0.2/architecture/computation_governance_services/overview/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html index b30b3611f95..2f0f0860c68 100644 --- a/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html index e91e4b37fcd..81337fa0db8 100644 --- a/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follow:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physica tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the this ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html b/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html index e40c5ac6bdc..d3e26241678 100644 --- a/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/microservice_governance_services/overview/index.html b/docs/1.0.2/architecture/microservice_governance_services/overview/index.html index 15a9260657d..d8035a1c28d 100644 --- a/docs/1.0.2/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.0.2/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -30,7 +30,7 @@ As the request receiver, the Receiver will be provided to process the request sent by the Sender in order to complete the synchronous response or asynchronous response.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/overview/index.html b/docs/1.0.2/architecture/overview/index.html index ed2496a29c2..8ba11392d6b 100644 --- a/docs/1.0.2/architecture/overview/index.html +++ b/docs/1.0.2/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis's ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/bml/index.html b/docs/1.0.2/architecture/public_enhancement_services/bml/index.html index 56b5861789d..23a216a5317 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/bml/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html index 90b8a106246..1d071e55cdd 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html index f0b59990462..3a1fef196e6 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html index 9fcef9ed3cb..6e2d0546661 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 697084f26ea..036a724404a 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 05319b12bea..91ca21e44f5 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 05573ba8296..deb82152296 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html index 2c9e9002431..2958ecce7b0 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html index e6edc2cacd9..eb40c94f800 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/overview/index.html b/docs/1.0.2/architecture/public_enhancement_services/overview/index.html index 95766159245..61a411299ff 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html b/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html index 4633775d745..d97dd5d3016 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.0.2/contact/index.html b/docs/1.0.2/contact/index.html index b53ef2463be..ec591b4a1d3 100644 --- a/docs/1.0.2/contact/index.html +++ b/docs/1.0.2/contact/index.html @@ -7,7 +7,7 @@ Contact Us | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/cluster_deployment/index.html b/docs/1.0.2/deployment/cluster_deployment/index.html index 69b9d41b30f..e904172b9aa 100644 --- a/docs/1.0.2/deployment/cluster_deployment/index.html +++ b/docs/1.0.2/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html b/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html index 31226469c8e..d12ed54837f 100644 --- a/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended to specify wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/installation_hierarchical_structure/index.html b/docs/1.0.2/deployment/installation_hierarchical_structure/index.html index 4dd5a7dc9be..696a8b0bc03 100644 --- a/docs/1.0.2/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.0.2/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Installation directory structure

    The directory structure of Linkis1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice CovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/quick_deploy/index.html b/docs/1.0.2/deployment/quick_deploy/index.html index f42dc8804b4..e323bc9cb82 100644 --- a/docs/1.0.2/deployment/quick_deploy/index.html +++ b/docs/1.0.2/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -23,7 +23,7 @@ # set the connection information of the database # including ip address, database's name, username and port # Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory. MYSQL_HOST= MYSQL_PORT= MYSQL_DB= MYSQL_USER= MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

        sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

      sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all of the following micro-services are registered on theEureka, it means that they've started successfully and are able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html index 3a4fe2c4cca..b4f2f3ee11b 100644 --- a/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information||-- web //Management desk code of linkis
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/web_install/index.html b/docs/1.0.2/deployment/web_install/index.html index 05111922178..e95aa3d1806 100644 --- a/docs/1.0.2/deployment/web_install/index.html +++ b/docs/1.0.2/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -20,7 +20,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.0.2/development/linkis_compile_and_package/index.html b/docs/1.0.2/development/linkis_compile_and_package/index.html index 4eb734cae3f..0d76a81757f 100644 --- a/docs/1.0.2/development/linkis_compile_and_package/index.html +++ b/docs/1.0.2/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/linkis_debug/index.html b/docs/1.0.2/development/linkis_debug/index.html index 2312a5e9fb5..fa8684ac4dd 100644 --- a/docs/1.0.2/development/linkis_debug/index.html +++ b/docs/1.0.2/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/new_engine_conn/index.html b/docs/1.0.2/development/new_engine_conn/index.html index 26f7c56dde8..4724d340d05 100644 --- a/docs/1.0.2/development/new_engine_conn/index.html +++ b/docs/1.0.2/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -17,7 +17,7 @@ b) createExecutor: creates a "HiveEngineConnExecutor" executor object based on the current engine connection information.

    Hive engine is an ordinary Java process, so when implementing "EngineConnLaunchBuilder", it directly inherits "JavaProcessEngineConnLaunchBuilder". Like memory size, Java parameters and classPath, it can be adjusted through configuration, please refer to "EnvConfiguration" class for details.

    Hive engine uses "LoadInstanceResource resources", so there is no need to implement "EngineResourceFactory", directly use the default "GenericEngineResourceFactory", adjust the number of resources through configuration, refer to "EngineConnPluginConf" class for details.

    Implement "HiveEngineConnPlugin" and provide methods for creating the above implementation classes.

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/web_build/index.html b/docs/1.0.2/development/web_build/index.html index 20ba7a71582..f09c825cc3e 100644 --- a/docs/1.0.2/development/web_build/index.html +++ b/docs/1.0.2/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/hive/index.html b/docs/1.0.2/engine_usage/hive/index.html index 59700127665..e41b945155e 100644 --- a/docs/1.0.2/engine_usage/hive/index.html +++ b/docs/1.0.2/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/jdbc/index.html b/docs/1.0.2/engine_usage/jdbc/index.html index 98f2c6f9da7..53e19acf8a6 100644 --- a/docs/1.0.2/engine_usage/jdbc/index.html +++ b/docs/1.0.2/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/overview/index.html b/docs/1.0.2/engine_usage/overview/index.html index 692bf4a55f1..60f183b6ca3 100644 --- a/docs/1.0.2/engine_usage/overview/index.html +++ b/docs/1.0.2/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/python/index.html b/docs/1.0.2/engine_usage/python/index.html index cfa691659ec..0e73a8c7849 100644 --- a/docs/1.0.2/engine_usage/python/index.html +++ b/docs/1.0.2/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/shell/index.html b/docs/1.0.2/engine_usage/shell/index.html index dc1f9390d24..9f634912425 100644 --- a/docs/1.0.2/engine_usage/shell/index.html +++ b/docs/1.0.2/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/spark/index.html b/docs/1.0.2/engine_usage/spark/index.html index 920038a6570..2a6eb80f9aa 100644 --- a/docs/1.0.2/engine_usage/spark/index.html +++ b/docs/1.0.2/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.0.2/introduction/index.html b/docs/1.0.2/introduction/index.html index 0a6ba23c4e8..47e428a865e 100644 --- a/docs/1.0.2/introduction/index.html +++ b/docs/1.0.2/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -23,7 +23,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.0.2/tags/index.html b/docs/1.0.2/tags/index.html index 2ccd4e5fddf..f3e3d235856 100644 --- a/docs/1.0.2/tags/index.html +++ b/docs/1.0.2/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html b/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html index de5eb5f6744..c4668f826ff 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includecom.webank.wedatasphere.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file
    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/overview/index.html b/docs/1.0.2/tuning_and_troubleshooting/overview/index.html index 93ca443f845..88ee5f14457 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html b/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html index c9007ab11ad..b29309d9ab5 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.0.2/upgrade/overview/index.html b/docs/1.0.2/upgrade/overview/index.html index 27222c71bae..32df5666451 100644 --- a/docs/1.0.2/upgrade/overview/index.html +++ b/docs/1.0.2/upgrade/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    The architecture of Linkis1.0 is very different from Linkis0.x, and there are some changes to the configuration of the deployment package and database tables. Before you install Linkis1.0, please read the following instructions carefully:

    1. If you are installing Linkis for the first time, or reinstalling Linkis, you do not need to pay attention to the Linkis Upgrade Guide.

    2. If you are upgrading from Linkis0.x to Linkis1.0, be sure to read the Linkis Upgrade from 0.x to 1.0 guide carefully.

    - + \ No newline at end of file diff --git a/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 77e813b0cef..d6005126def 100644 --- a/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/console_manual/index.html b/docs/1.0.2/user_guide/console_manual/index.html index af02caaa6a0..38d96754b52 100644 --- a/docs/1.0.2/user_guide/console_manual/index.html +++ b/docs/1.0.2/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Introduction to Computatoin Governance Console

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    Structure of Computatoin Governance Console

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • [Global History]
    • [Resource Management]
    • [Parameter Configuration]
    • [Global Variables]
    • [ECM Management] (Only visible to linkis computing management console administrators)
    • [Microservice Management] (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    Introduction to the functions and use of Computatoin Governance Console

    Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/how_to_use/index.html b/docs/1.0.2/user_guide/how_to_use/index.html index 6087fdb9047..cebb2be4347 100644 --- a/docs/1.0.2/user_guide/how_to_use/index.html +++ b/docs/1.0.2/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/linkiscli_manual/index.html b/docs/1.0.2/user_guide/linkiscli_manual/index.html index bf3b0c6b14a..d379b1d10a6 100644 --- a/docs/1.0.2/user_guide/linkiscli_manual/index.html +++ b/docs/1.0.2/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/overview/index.html b/docs/1.0.2/user_guide/overview/index.html index c6b90128d16..77774705d7d 100644 --- a/docs/1.0.2/user_guide/overview/index.html +++ b/docs/1.0.2/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/sdk_manual/index.html b/docs/1.0.2/user_guide/sdk_manual/index.html index 9514cdb31a8..85e829bd9d5 100644 --- a/docs/1.0.2/user_guide/sdk_manual/index.html +++ b/docs/1.0.2/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -47,7 +47,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/jdbc_api/index.html b/docs/1.0.3/api/jdbc_api/index.html index 08bd22ba923..923cff80bd3 100644 --- a/docs/1.0.3/api/jdbc_api/index.html +++ b/docs/1.0.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/linkis_task_operator/index.html b/docs/1.0.3/api/linkis_task_operator/index.html index e5d394fc731..e6b66ea8239 100644 --- a/docs/1.0.3/api/linkis_task_operator/index.html +++ b/docs/1.0.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/login_api/index.html b/docs/1.0.3/api/login_api/index.html index 3cb61580105..92836178954 100644 --- a/docs/1.0.3/api/login_api/index.html +++ b/docs/1.0.3/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/overview/index.html b/docs/1.0.3/api/overview/index.html index 22c27fb8486..c7ab3fd2d04 100644 --- a/docs/1.0.3/api/overview/index.html +++ b/docs/1.0.3/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/add_an_engine_conn/index.html b/docs/1.0.3/architecture/add_an_engine_conn/index.html index 77ee39fb3aa..a406495da4f 100644 --- a/docs/1.0.3/architecture/add_an_engine_conn/index.html +++ b/docs/1.0.3/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/commons/message_scheduler/index.html b/docs/1.0.3/architecture/commons/message_scheduler/index.html index e42e673e8e4..2ad6c3e9f3f 100644 --- a/docs/1.0.3/architecture/commons/message_scheduler/index.html +++ b/docs/1.0.3/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/commons/rpc/index.html b/docs/1.0.3/architecture/commons/rpc/index.html index 720016315f9..e70ded1a46a 100644 --- a/docs/1.0.3/architecture/commons/rpc/index.html +++ b/docs/1.0.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html index d4172a501fa..5d4049ad65d 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index bc1168b1f3a..fccc7341f51 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 81ac99212f0..5af285707f0 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/entrance/index.html b/docs/1.0.3/architecture/computation_governance_services/entrance/index.html index 67e1cac22a0..f6c0e77cee6 100644 --- a/docs/1.0.3/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html index e1a89531601..f1e3bf5f5d6 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index a2ed5923155..fc75e1c5b0c 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 4bf02287b56..62f8321ba29 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 606e6f65b35..7d7a81bcc93 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 7658f0191b3..436f95d52b2 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/overview/index.html b/docs/1.0.3/architecture/computation_governance_services/overview/index.html index baf6b80964a..529f1cba190 100644 --- a/docs/1.0.3/architecture/computation_governance_services/overview/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html index c8d46c41e98..2da85635842 100644 --- a/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html index 7348f17e5ab..b98883065ef 100644 --- a/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html b/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html index aedf7f02ad2..f46e23406c1 100644 --- a/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/microservice_governance_services/overview/index.html b/docs/1.0.3/architecture/microservice_governance_services/overview/index.html index 8564851d464..e6bf772daea 100644 --- a/docs/1.0.3/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.0.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/overview/index.html b/docs/1.0.3/architecture/overview/index.html index 7e2536711fa..4e6050bcb07 100644 --- a/docs/1.0.3/architecture/overview/index.html +++ b/docs/1.0.3/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/bml/index.html b/docs/1.0.3/architecture/public_enhancement_services/bml/index.html index 6a61f21b1e7..f6f1abc6ea3 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/bml/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html index dbbfd376882..59aa288daba 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 9fd826f9f90..7d82a24c018 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index 5e0ad103c04..3586a6e4144 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 151382ccd88..a3b6e72cdaf 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 1fb5d572915..d7c197a43e7 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 5d4dd22986a..300d28289bc 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index b9d55ce5a3d..1ea9948fb1d 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html index 924686d1397..77d65e74384 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/overview/index.html b/docs/1.0.3/architecture/public_enhancement_services/overview/index.html index c66ae157fdb..fc6ee606a35 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html b/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html index d60ce08e3fe..c8082ead9b5 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.0.3/contact/index.html b/docs/1.0.3/contact/index.html index df746186416..4da5c104c91 100644 --- a/docs/1.0.3/contact/index.html +++ b/docs/1.0.3/contact/index.html @@ -7,7 +7,7 @@ Contact Us | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/cluster_deployment/index.html b/docs/1.0.3/deployment/cluster_deployment/index.html index 4be7eca9746..03918f794dd 100644 --- a/docs/1.0.3/deployment/cluster_deployment/index.html +++ b/docs/1.0.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html b/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html index d0d1a99c1bd..abc66cd5434 100644 --- a/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/installation_hierarchical_structure/index.html b/docs/1.0.3/deployment/installation_hierarchical_structure/index.html index 215fc3e183b..6fde4eee04a 100644 --- a/docs/1.0.3/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.0.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/quick_deploy/index.html b/docs/1.0.3/deployment/quick_deploy/index.html index c110998522a..b2abf7c7ffa 100644 --- a/docs/1.0.3/deployment/quick_deploy/index.html +++ b/docs/1.0.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html index 06bc1b2882f..f99a34a1ab9 100644 --- a/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/web_install/index.html b/docs/1.0.3/deployment/web_install/index.html index c79cec9eb43..de0f436d40d 100644 --- a/docs/1.0.3/deployment/web_install/index.html +++ b/docs/1.0.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.0.3/development/linkis_compile_and_package/index.html b/docs/1.0.3/development/linkis_compile_and_package/index.html index 269aa6ff26a..4d6fc5cc37a 100644 --- a/docs/1.0.3/development/linkis_compile_and_package/index.html +++ b/docs/1.0.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/linkis_debug/index.html b/docs/1.0.3/development/linkis_debug/index.html index 0f97dd5854a..5341ac81513 100644 --- a/docs/1.0.3/development/linkis_debug/index.html +++ b/docs/1.0.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/new_engine_conn/index.html b/docs/1.0.3/development/new_engine_conn/index.html index 7e8512e084e..bec4343cc73 100644 --- a/docs/1.0.3/development/new_engine_conn/index.html +++ b/docs/1.0.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/springmvc-replaces-jersey/index.html b/docs/1.0.3/development/springmvc-replaces-jersey/index.html index 05017bbdde2..6c2a1c1d460 100644 --- a/docs/1.0.3/development/springmvc-replaces-jersey/index.html +++ b/docs/1.0.3/development/springmvc-replaces-jersey/index.html @@ -7,7 +7,7 @@ SpringMVC Replaces Jersey | Apache Linkis - + @@ -17,7 +17,7 @@

    For details, please refer to

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/web_build/index.html b/docs/1.0.3/development/web_build/index.html index 43cb0866f95..be57e6f06b4 100644 --- a/docs/1.0.3/development/web_build/index.html +++ b/docs/1.0.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/flink/index.html b/docs/1.0.3/engine_usage/flink/index.html index 79f7f218628..ec529f6594c 100644 --- a/docs/1.0.3/engine_usage/flink/index.html +++ b/docs/1.0.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/hive/index.html b/docs/1.0.3/engine_usage/hive/index.html index 5894d85f94f..ab7b070ee62 100644 --- a/docs/1.0.3/engine_usage/hive/index.html +++ b/docs/1.0.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/jdbc/index.html b/docs/1.0.3/engine_usage/jdbc/index.html index 2f180b315be..6d0e4a128b8 100644 --- a/docs/1.0.3/engine_usage/jdbc/index.html +++ b/docs/1.0.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/overview/index.html b/docs/1.0.3/engine_usage/overview/index.html index 111c7e7e1e8..b519d8cb0fe 100644 --- a/docs/1.0.3/engine_usage/overview/index.html +++ b/docs/1.0.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/python/index.html b/docs/1.0.3/engine_usage/python/index.html index 4fa175dc600..83c2c248c29 100644 --- a/docs/1.0.3/engine_usage/python/index.html +++ b/docs/1.0.3/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/shell/index.html b/docs/1.0.3/engine_usage/shell/index.html index 93ebd1f5c2e..fc533d68748 100644 --- a/docs/1.0.3/engine_usage/shell/index.html +++ b/docs/1.0.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/spark/index.html b/docs/1.0.3/engine_usage/spark/index.html index 918edeec29d..ffa3dbf01bb 100644 --- a/docs/1.0.3/engine_usage/spark/index.html +++ b/docs/1.0.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.0.3/introduction/index.html b/docs/1.0.3/introduction/index.html index be48671447c..fcb12689f37 100644 --- a/docs/1.0.3/introduction/index.html +++ b/docs/1.0.3/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -23,7 +23,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.0.3/tags/index.html b/docs/1.0.3/tags/index.html index d8a3427b038..96260ecaf3a 100644 --- a/docs/1.0.3/tags/index.html +++ b/docs/1.0.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html b/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html index 7f2d69ac708..ae9ded4674a 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file
    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/overview/index.html b/docs/1.0.3/tuning_and_troubleshooting/overview/index.html index bb215f6eb52..08847bfef31 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html b/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html index 839ee879fb0..635bbf78716 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.0.3/upgrade/overview/index.html b/docs/1.0.3/upgrade/overview/index.html index 2e20934af0b..c4c7fa54b7f 100644 --- a/docs/1.0.3/upgrade/overview/index.html +++ b/docs/1.0.3/upgrade/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    The architecture of Linkis1.0 is very different from Linkis0.x, and there are some changes to the configuration of the deployment package and database tables. Before you install Linkis1.0, please read the following instructions carefully:

    1. If you are installing Linkis for the first time, or reinstalling Linkis, you do not need to pay attention to the Linkis Upgrade Guide.

    2. If you are upgrading from Linkis0.x to Linkis1.0, be sure to read the Linkis Upgrade from 0.x to 1.0 guide carefully.

    - + \ No newline at end of file diff --git a/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index e45e02cc323..b8635cfbc2a 100644 --- a/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/console_manual/index.html b/docs/1.0.3/user_guide/console_manual/index.html index 86c593f5a5f..549ace43b32 100644 --- a/docs/1.0.3/user_guide/console_manual/index.html +++ b/docs/1.0.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/how_to_use/index.html b/docs/1.0.3/user_guide/how_to_use/index.html index 103a45e80bf..8ce2aa773f6 100644 --- a/docs/1.0.3/user_guide/how_to_use/index.html +++ b/docs/1.0.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/linkiscli_manual/index.html b/docs/1.0.3/user_guide/linkiscli_manual/index.html index 938d52dd52a..a4f70354730 100644 --- a/docs/1.0.3/user_guide/linkiscli_manual/index.html +++ b/docs/1.0.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/overview/index.html b/docs/1.0.3/user_guide/overview/index.html index d6e5e20512c..8dcd3acfedd 100644 --- a/docs/1.0.3/user_guide/overview/index.html +++ b/docs/1.0.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/sdk_manual/index.html b/docs/1.0.3/user_guide/sdk_manual/index.html index 5107ff1e34c..c1078d1a406 100644 --- a/docs/1.0.3/user_guide/sdk_manual/index.html +++ b/docs/1.0.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/http/data-source-manager-api/index.html b/docs/1.1.0/api/http/data-source-manager-api/index.html index 7a514eebafd..e0e1878d497 100644 --- a/docs/1.1.0/api/http/data-source-manager-api/index.html +++ b/docs/1.1.0/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/http/metadatamanager-api/index.html b/docs/1.1.0/api/http/metadatamanager-api/index.html index 9ad5956d584..e758470bb94 100644 --- a/docs/1.1.0/api/http/metadatamanager-api/index.html +++ b/docs/1.1.0/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/jdbc_api/index.html b/docs/1.1.0/api/jdbc_api/index.html index 39202f2b5e2..539cdde0816 100644 --- a/docs/1.1.0/api/jdbc_api/index.html +++ b/docs/1.1.0/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/linkis_task_operator/index.html b/docs/1.1.0/api/linkis_task_operator/index.html index 315f93b42a8..019e2dc38dd 100644 --- a/docs/1.1.0/api/linkis_task_operator/index.html +++ b/docs/1.1.0/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/login_api/index.html b/docs/1.1.0/api/login_api/index.html index d6405c026c5..022943091a5 100644 --- a/docs/1.1.0/api/login_api/index.html +++ b/docs/1.1.0/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/overview/index.html b/docs/1.1.0/api/overview/index.html index b1a848018f7..07ea0425f74 100644 --- a/docs/1.1.0/api/overview/index.html +++ b/docs/1.1.0/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/add_an_engine_conn/index.html b/docs/1.1.0/architecture/add_an_engine_conn/index.html index 78d805aee19..fda68cbd546 100644 --- a/docs/1.1.0/architecture/add_an_engine_conn/index.html +++ b/docs/1.1.0/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/commons/message_scheduler/index.html b/docs/1.1.0/architecture/commons/message_scheduler/index.html index 9abf0587ef1..32b5be557a3 100644 --- a/docs/1.1.0/architecture/commons/message_scheduler/index.html +++ b/docs/1.1.0/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/commons/rpc/index.html b/docs/1.1.0/architecture/commons/rpc/index.html index f0a61d72b82..ba7fb738086 100644 --- a/docs/1.1.0/architecture/commons/rpc/index.html +++ b/docs/1.1.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html index b2c307d1bca..e5d480c78f0 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html index da90e79c0a5..641f15a82cf 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index def94d28c7e..fc58f5665c0 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/entrance/index.html b/docs/1.1.0/architecture/computation_governance_services/entrance/index.html index 01febcaca04..98cf69a688c 100644 --- a/docs/1.1.0/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html index 1e5b0a8b840..69327b952c4 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 5a088a4c752..26c65b51d11 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 802f745d6a2..ba26f51ac8f 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html index 8d7234878b9..a7fbd30a27b 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index a991f090965..b109338cab3 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/overview/index.html b/docs/1.1.0/architecture/computation_governance_services/overview/index.html index 690f0dab1c9..5a551ec91cd 100644 --- a/docs/1.1.0/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html index bd6f87e2536..0bdbd403d17 100644 --- a/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html index 7ec50fbe936..40fa5452a95 100644 --- a/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html index cceb4393369..56fa8bbd349 100644 --- a/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/microservice_governance_services/overview/index.html b/docs/1.1.0/architecture/microservice_governance_services/overview/index.html index 0d9b279dfda..0756c63db78 100644 --- a/docs/1.1.0/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.0/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/overview/index.html b/docs/1.1.0/architecture/overview/index.html index 501424001e0..0ce6801991f 100644 --- a/docs/1.1.0/architecture/overview/index.html +++ b/docs/1.1.0/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index f8ef6fba4d8..030c53d0731 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html index bb479ccbcbf..0aca325cb38 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html index d20b9b0581e..42a0c52b532 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html index bc7c99cbc72..0244b60f360 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html index d518fdba88c..b11fc084b1e 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 1bec4e88e1e..cbb93d51421 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 5d0a66ffe78..c4cf9820013 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 13bfb636414..7e5bba966d8 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html index 802c6fd6b15..b011f72af78 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html index 0f454f779ab..0d4da3801ff 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html index 23557af6f67..7f0324c2096 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis-ps-dm-datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html index 3a9c439c717..c4abcdf8e02 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/overview/index.html index e190516a7b6..3978c7132d1 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html index b33691346df..3dfbee38845 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/cluster_deployment/index.html b/docs/1.1.0/deployment/cluster_deployment/index.html index 109244078f3..3115f0ae450 100644 --- a/docs/1.1.0/deployment/cluster_deployment/index.html +++ b/docs/1.1.0/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html index 785c41deebd..5a30bb28f47 100644 --- a/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/installation_hierarchical_structure/index.html b/docs/1.1.0/deployment/installation_hierarchical_structure/index.html index 277858c3959..fb65c37f0ec 100644 --- a/docs/1.1.0/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.0/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html index c886b266024..07003f95958 100644 --- a/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/quick_deploy/index.html b/docs/1.1.0/deployment/quick_deploy/index.html index e16371e233e..e767802b0a7 100644 --- a/docs/1.1.0/deployment/quick_deploy/index.html +++ b/docs/1.1.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html index 9716df55bf8..27de10e358d 100644 --- a/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/start_metadatasource/index.html b/docs/1.1.0/deployment/start_metadatasource/index.html index 6ae467ba6cf..b8aae6daa11 100644 --- a/docs/1.1.0/deployment/start_metadatasource/index.html +++ b/docs/1.1.0/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/web_install/index.html b/docs/1.1.0/deployment/web_install/index.html index 645e354e29a..81f73708270 100644 --- a/docs/1.1.0/deployment/web_install/index.html +++ b/docs/1.1.0/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_compile_and_package/index.html b/docs/1.1.0/development/linkis_compile_and_package/index.html index 0bccf4a1d6f..a9fce9e4f55 100644 --- a/docs/1.1.0/development/linkis_compile_and_package/index.html +++ b/docs/1.1.0/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_config/index.html b/docs/1.1.0/development/linkis_config/index.html index c16fe6a91b3..811254158a9 100644 --- a/docs/1.1.0/development/linkis_config/index.html +++ b/docs/1.1.0/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_debug/index.html b/docs/1.1.0/development/linkis_debug/index.html index 73a9d938956..79bd63bc3d8 100644 --- a/docs/1.1.0/development/linkis_debug/index.html +++ b/docs/1.1.0/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_debug_in_mac/index.html b/docs/1.1.0/development/linkis_debug_in_mac/index.html index 2e3c525937f..c2224c18265 100644 --- a/docs/1.1.0/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.0/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/development/new_engine_conn/index.html b/docs/1.1.0/development/new_engine_conn/index.html index fffaa375f02..4cba923c7f1 100644 --- a/docs/1.1.0/development/new_engine_conn/index.html +++ b/docs/1.1.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/web_build/index.html b/docs/1.1.0/development/web_build/index.html index 710a457cff2..d928a29ed55 100644 --- a/docs/1.1.0/development/web_build/index.html +++ b/docs/1.1.0/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/flink/index.html b/docs/1.1.0/engine_usage/flink/index.html index 0990712ce11..03fa9220ec7 100644 --- a/docs/1.1.0/engine_usage/flink/index.html +++ b/docs/1.1.0/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/hive/index.html b/docs/1.1.0/engine_usage/hive/index.html index 9d98c5f084d..25acaf9da60 100644 --- a/docs/1.1.0/engine_usage/hive/index.html +++ b/docs/1.1.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/jdbc/index.html b/docs/1.1.0/engine_usage/jdbc/index.html index 41969782e67..0cea1f63da5 100644 --- a/docs/1.1.0/engine_usage/jdbc/index.html +++ b/docs/1.1.0/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/overview/index.html b/docs/1.1.0/engine_usage/overview/index.html index a8d8b2de629..80cdb57f7e7 100644 --- a/docs/1.1.0/engine_usage/overview/index.html +++ b/docs/1.1.0/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/python/index.html b/docs/1.1.0/engine_usage/python/index.html index 0f9efc8b9fd..fcf8c6a3413 100644 --- a/docs/1.1.0/engine_usage/python/index.html +++ b/docs/1.1.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/shell/index.html b/docs/1.1.0/engine_usage/shell/index.html index dc00e4113d3..d296877ed9e 100644 --- a/docs/1.1.0/engine_usage/shell/index.html +++ b/docs/1.1.0/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/spark/index.html b/docs/1.1.0/engine_usage/spark/index.html index 88660f24ede..8112dfec52b 100644 --- a/docs/1.1.0/engine_usage/spark/index.html +++ b/docs/1.1.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.0/introduction/index.html b/docs/1.1.0/introduction/index.html index 94f29a0e4cb..e05d2d55024 100644 --- a/docs/1.1.0/introduction/index.html +++ b/docs/1.1.0/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.0/release/index.html b/docs/1.1.0/release/index.html index 49b002b452d..058d85444ff 100644 --- a/docs/1.1.0/release/index.html +++ b/docs/1.1.0/release/index.html @@ -7,7 +7,7 @@ Version Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Version Overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    ps-metadatamanagerNewwds.linkis.server.mdm.service.lib.dir/lib/linkis-public-enhancements/linkis-ps-metadatamanager/serviceSet the relative path to load the data source jar package, will be called by reflection
    ps-metadatamanagerNewwds.linkis.server.mdm.service.instance.expire-in-seconds60Set the expiration time for loading sub-services, after which the service will not be loaded
    ps-metadatamanagerNewwds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the name of the data source information
    ps-metadatamanagerNewwds.linkis.server.mdm.service.app.namelinkis-ps-metadatamanagerService name for setting metadata information
    ps-metadatamanagerNewwds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.userhadoopSet the access user of hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.kerberos.krb5.path""Set the kerberos krb5 path used by the hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.temp.locationclasspath:/tmpSet the temporary path of kafka and hive
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.DriverSet the driver of mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sSet the url format of mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.connect.timeout3000Set the connection timeout time for mysql service to connect to mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.socket.timeout6000Set the socket timeout time for mysql service to open mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.temp.location/tmp/keytabSet the local temporary storage path of the service, mainly to store the authentication files downloaded from the bml material service
    ps-data-source-managerNewwds.linkis.server.dsm.auth.adminhadoopdatasourcemanager part of the interface permission authentication user
    cg-engineconnmanagerModifiedwds.linkis.engineconn.max.free.time1h -> 0.5hMaximum idle time of EngineConn changed from 1h to 0.5h

    DB Table Changes#

    For details, see the upgrade schemadb/upgrade/1.1.0_schema file in the corresponding branch of the code repository (https://github.com/apache/incubator-linkis).

    - + \ No newline at end of file diff --git a/docs/1.1.0/tags/index.html b/docs/1.1.0/tags/index.html index b508346967b..2d74937a04d 100644 --- a/docs/1.1.0/tags/index.html +++ b/docs/1.1.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html index 2208bb07c47..361307f4598 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/overview/index.html b/docs/1.1.0/tuning_and_troubleshooting/overview/index.html index 6b5b52a1601..c544fa603a2 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html index 0bc4334823c..8896fd4cac7 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 828fc1125b1..9d0755b8b39 100644 --- a/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.0/upgrade/upgrade_guide/index.html b/docs/1.1.0/upgrade/upgrade_guide/index.html index 5b257252e27..fad240c5841 100644 --- a/docs/1.1.0/upgrade/upgrade_guide/index.html +++ b/docs/1.1.0/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/console_manual/index.html b/docs/1.1.0/user_guide/console_manual/index.html index 8bfd7ceca81..486ea95706d 100644 --- a/docs/1.1.0/user_guide/console_manual/index.html +++ b/docs/1.1.0/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/how_to_use/index.html b/docs/1.1.0/user_guide/how_to_use/index.html index 4a6e97555b8..c2f8ef705b4 100644 --- a/docs/1.1.0/user_guide/how_to_use/index.html +++ b/docs/1.1.0/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/linkis-datasource-client/index.html b/docs/1.1.0/user_guide/linkis-datasource-client/index.html index dd57db21c60..89f70908ba4 100644 --- a/docs/1.1.0/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.0/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/linkiscli_manual/index.html b/docs/1.1.0/user_guide/linkiscli_manual/index.html index 0cb92d55dd1..3c4ca645823 100644 --- a/docs/1.1.0/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.0/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/overview/index.html b/docs/1.1.0/user_guide/overview/index.html index 183b3c3d643..858b71548ac 100644 --- a/docs/1.1.0/user_guide/overview/index.html +++ b/docs/1.1.0/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/sdk_manual/index.html b/docs/1.1.0/user_guide/sdk_manual/index.html index bb49341025a..2aebd5f5745 100644 --- a/docs/1.1.0/user_guide/sdk_manual/index.html +++ b/docs/1.1.0/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/data-source-manager-api/index.html b/docs/1.1.1/api/http/data-source-manager-api/index.html index 0bbe96dab52..c013531d307 100644 --- a/docs/1.1.1/api/http/data-source-manager-api/index.html +++ b/docs/1.1.1/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html b/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html index 14706ab5b1e..4d1c8a5584a 100644 --- a/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html +++ b/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/metadatamanager-api/index.html b/docs/1.1.1/api/http/metadatamanager-api/index.html index dd221bc782b..b72308f5a31 100644 --- a/docs/1.1.1/api/http/metadatamanager-api/index.html +++ b/docs/1.1.1/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/udf-api/index.html b/docs/1.1.1/api/http/udf-api/index.html index 5c873462ed7..826deca26b8 100644 --- a/docs/1.1.1/api/http/udf-api/index.html +++ b/docs/1.1.1/api/http/udf-api/index.html @@ -7,7 +7,7 @@ UDF接口 | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    UDF接口

    UDF移交#

    基本信息#

    Path: /api/rest_j/v1/udf/handover

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    handoverUserstring必须被移交用户

    UDF修改#

    基本信息#

    Path: /api/rest_j/v1/udf/update

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfUpdateVoobject必须
    ├─ idnumber必须
    ├─ udfNamestring必须不能修改
    ├─ udfTypenumber必须不能修改
    ├─ descriptionstring必须
    ├─ pathstring必须jar类型udf采用文件路径方式上传
    ├─ useFormatstring必须
    ├─ registerFormatstring必须
    ### 返回数据
    名称类型是否必须默认值备注其他信息

    UDF共享用户列表#

    基本信息#

    Path: /api/rest_j/v1/udf/getSharedUsers

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    ### 返回数据
    名称类型是否必须默认值备注其他信息
    sharedUsersstring []必须

    item 类型: string

    ├─ 非必须

    UDF删除#

    基本信息#

    Path: /api/rest_j/v1/udf/delete/{id}

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    路径参数

    参数名称示例备注
    id100udf id

    Body

    名称类型是否必须默认值备注其他信息

    UDF新增#

    基本信息#

    Path: /api/rest_j/v1/udf/add

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfAddVoobject必须
    ├─ udfNamestring必须
    ├─ udfTypenumber必须
    ├─ descriptionstring必须
    ├─ pathstring必须jar类型udf采用文件路径方式上传
    ├─ sharedboolean非必须不用传
    ├─ useFormatstring必须
    ├─ expireboolean非必须不用传
    ├─ loadboolean必须
    ├─ registerFormatstring必须
    ├─ treeIdnumber非必须不用传
    ├─ sysstring必须系统:暂时均为:“IDE”
    ├─ clusterNamestring必须集群,暂时均为“all”
    ├─ directorystring必须个人函数的一级分类目录

    返回数据#

    名称类型是否必须默认值备注其他信息

    UDF查看源码#

    基本信息#

    Path: /api/rest_j/v1/udf/downloadUdf

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须

    返回数据#

    名称类型是否必须默认值备注其他信息
    methodstring必须
    statusnumber必须
    messagestring必须
    dataobject必须
    ├─ contentstring必须udf内容

    UDF版本发布#

    基本信息#

    Path: /api/rest_j/v1/udf/publish

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须发布的版本:v000005

    UDF共享#

    基本信息#

    Path: /api/rest_j/v1/udf/shareUDF

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfInfoobject必须
    ├─ idnumber必须
    ├─ udfNamestring必须
    ├─ udfTypenumber必须
    sharedUsersstring []必须被共享用户列表

    item 类型: string

    ├─ 非必须

    UDF管理页面#

    注:只能看到用户自己创建的udf

    基本信息#

    Path: /api/rest_j/v1/udf/managerPages

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfNamestring非必须
    udfTypestring必须逗号分割的字符串,如:0,1,2
    createUserstring非必须
    curPagenumber必须第几页
    pageSizenumber必须记录个数

    返回数据#

    名称类型是否必须默认值备注其他信息
    infoListobject []必须

    item 类型: object

    ├─ idnumber必须
    ├─ createUserstring必须udf创建者
    ├─ udfNamestring必须
    ├─ udfTypestring必须
    ├─ expireboolean必须udf是否过期
    ├─ sharedboolean必须是否为共享udf
    ├─ treeIdnumber必须
    ├─ sysstring必须系统,例如:dss
    ├─ clusterNamestring必须集群,暂时为all
    ├─ createTimenumber必须
    ├─ updateTimenumber必须
    ├─ pathstring必须用户上一次上传的路径,作展示用
    ├─ registerFormatstring必须
    ├─ useFormatstring必须
    ├─ descriptionstring必须
    ├─ operationStatusobject必须分类
    ├─ canUpdateboolean必须可否编辑
    ├─ canShareboolean必须可否共享
    ├─ canPublishboolean必须可否发布
    ├─ canDeleteboolean必须可否删除
    ├─ canExpireboolean必须可否过期
    ├─ canHandoverboolean必须可否移交
    totalPagenumber必须总页数
    field_1string必须
    totalnumber必须总条数

    UDF过期#

    基本信息#

    Path: /api/rest_j/v1/udf/setExpire

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须

    返回数据#

    名称类型是否必须默认值备注其他信息

    udf文件下载到本地#

    基本信息#

    Path: /api/rest_j/v1/udf/downloadToLocal

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须

    返回数据#

    名称类型是否必须默认值备注其他信息

    版本列表查看#

    基本信息#

    Path: /api/rest_j/v1/udf/versionList

    Method: GET

    接口描述:

    请求参数#

    Query

    参数名称是否必须示例备注
    udfId100

    返回数据#

    名称类型是否必须默认值备注其他信息
    versionListobject []非必须

    item 类型: object

    ├─ idnumber非必须
    ├─ udfIdnumber非必须
    ├─ pathstring非必须
    ├─ bmlResourceIdstring非必须
    ├─ bmlResourceVersionstring非必须
    ├─ isPublishedboolean非必须
    ├─ registerFormatstring非必须
    ├─ useFormatstring非必须
    ├─ descriptionstring非必须
    ├─ createTimenumber非必须
    ├─ expireboolean非必须
    ├─ createUserstring非必须

    版本回退#

    基本信息#

    Path: /api/rest_j/v1/udf/rollback

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须回退版本

    获取udf用户列表#

    基本信息#

    Path: /api/rest_j/v1/udf/allUdfUsers

    Method: GET

    接口描述:

    请求参数#

    返回数据#

    名称类型是否必须默认值备注其他信息
    udfUsersstring []必须

    item 类型: string

    ├─ 非必须

    获取用户个人函数的一级分类#

    基本信息#

    Path: /api/rest_j/v1/udf/userDirectory

    Method: GET

    接口描述:

    请求参数#

    Query

    参数名称是否必须示例备注
    categoryudf必须是"udf"或"function",分别代表获取udf函数的一级分类和方法函数的一级分类

    返回数据#

    名称类型是否必须默认值备注其他信息
    userDirectorystring []必须分类名组成的列表

    item 类型: string

    ├─ 非必须
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/jdbc_api/index.html b/docs/1.1.1/api/jdbc_api/index.html index a78487f7c2b..e495aeab871 100644 --- a/docs/1.1.1/api/jdbc_api/index.html +++ b/docs/1.1.1/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/linkis_task_operator/index.html b/docs/1.1.1/api/linkis_task_operator/index.html index cbdcc324cf0..add7fa4e941 100644 --- a/docs/1.1.1/api/linkis_task_operator/index.html +++ b/docs/1.1.1/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    {  "method": "",  "status": 0,  "message": "",  "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    {  "method": "/api/rest_j/v1/entrance/execute",  "status": 0,  "message": "Request executed successfully",  "data": {    "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",    "taskID": "123"  }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/login_api/index.html b/docs/1.1.1/api/login_api/index.html index f0da86d5fce..6dcacecd905 100644 --- a/docs/1.1.1/api/login_api/index.html +++ b/docs/1.1.1/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/overview/index.html b/docs/1.1.1/api/overview/index.html index 551849fbe32..4097a9a85d5 100644 --- a/docs/1.1.1/api/overview/index.html +++ b/docs/1.1.1/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/add_an_engine_conn/index.html b/docs/1.1.1/architecture/add_an_engine_conn/index.html index 52ee6128dca..c48351ebabd 100644 --- a/docs/1.1.1/architecture/add_an_engine_conn/index.html +++ b/docs/1.1.1/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/commons/message_scheduler/index.html b/docs/1.1.1/architecture/commons/message_scheduler/index.html index a0e5e018077..2d1dc33dd93 100644 --- a/docs/1.1.1/architecture/commons/message_scheduler/index.html +++ b/docs/1.1.1/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/commons/rpc/index.html b/docs/1.1.1/architecture/commons/rpc/index.html index 08356b59aa7..f146248c9b8 100644 --- a/docs/1.1.1/architecture/commons/rpc/index.html +++ b/docs/1.1.1/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html index 904e811239d..889c91bc734 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html index c405ce7e171..b6fe1518c5e 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index e9b218a911b..b9ff696b970 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/entrance/index.html b/docs/1.1.1/architecture/computation_governance_services/entrance/index.html index 080126863c8..7bba18371a7 100644 --- a/docs/1.1.1/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html index 9ec87555e80..c248e4fc857 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 6b99f4694e5..f383bc81c6d 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html index d8aa15ae6e1..698d6c2a0a4 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html index 68d7ff13ea7..9c7ca95c183 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index fcf8c46d047..9427aa3cc6e 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/overview/index.html b/docs/1.1.1/architecture/computation_governance_services/overview/index.html index c729e82f860..f3ae674abad 100644 --- a/docs/1.1.1/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html index 109ff2e98cf..3993220571b 100644 --- a/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html index 1cbfb60a495..d7e6b3793b0 100644 --- a/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html index de6b32c6108..3d1380c8c80 100644 --- a/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/microservice_governance_services/overview/index.html b/docs/1.1.1/architecture/microservice_governance_services/overview/index.html index 75da6a22675..0f15f3de4e5 100644 --- a/docs/1.1.1/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.1/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/overview/index.html b/docs/1.1.1/architecture/overview/index.html index 5725cbd9a94..78963e052b2 100644 --- a/docs/1.1.1/architecture/overview/index.html +++ b/docs/1.1.1/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/proxy_user/index.html b/docs/1.1.1/architecture/proxy_user/index.html index 6d53c11326c..042845417ac 100644 --- a/docs/1.1.1/architecture/proxy_user/index.html +++ b/docs/1.1.1/architecture/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 0c8cef505ef..8318ec188ad 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html index 86b95a1292d..caf37e7a083 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html index 4a394f7104d..e75903fe508 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 4de8e4ed3a3..7e8e25a4228 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html index ee81f287308..ce34a707c3e 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 0cec9cbddfc..2117c62c35d 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 510d7eed411..3e2cdf91fa4 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index e6e4b8c1c8c..4657f29a3a3 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html index 350b5487ad6..34c741f2a46 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html index cefd11b1598..934a63a2e44 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html index 2029a3e85fd..0dc3b708282 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html index bb80ca9e035..4a2121e64bb 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/overview/index.html index 862d1d4e6d0..1e83e60fa20 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html index 4df87295b86..e0c14e86f80 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/cluster_deployment/index.html b/docs/1.1.1/deployment/cluster_deployment/index.html index 6a3a9dca703..a1c9fb3500b 100644 --- a/docs/1.1.1/deployment/cluster_deployment/index.html +++ b/docs/1.1.1/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html index 2b0f40d282e..eec7fbdad12 100644 --- a/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/installation_hierarchical_structure/index.html b/docs/1.1.1/deployment/installation_hierarchical_structure/index.html index 7f22ebb1ee7..ff43d7a08c5 100644 --- a/docs/1.1.1/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html index 7a2d2efda9e..e8100764846 100644 --- a/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/linkis_scriptis_install/index.html b/docs/1.1.1/deployment/linkis_scriptis_install/index.html index 6cb25fe740e..3418356e23d 100644 --- a/docs/1.1.1/deployment/linkis_scriptis_install/index.html +++ b/docs/1.1.1/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of tool scriptis | Apache Linkis - + @@ -28,7 +28,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note the difference between root and alias in nginx

    • The result of root processing is: root path + location path
    • The result of alias processing is to replace the location path with the alias path
    • Alias is the definition of a directory alias, and root is the definition of the top-level directory

    4. scriptis Use steps#

    4.1 Log in to the linkis management console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because scripts requires login verification, you need to log in first to get the cookie.

    4.2 Visit the scripts page after successful login#

    #http://10.10.10.10:8080/scriptis/http://nginxIp:port/scriptis/

    Nginxip:nginx server IP, port:linkis management console nginx configuration start port number, scripts is the location address configured for the static file nginx of the requested scripts project (customizable)

    4.3 use scriptis#

    Take creating an SQL query task as an example.

    step1 New script

    design sketch

    step2 Enter the statement to query

    design sketch

    step3 function

    design sketch

    shep4 View results

    design sketch

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/quick_deploy/index.html b/docs/1.1.1/deployment/quick_deploy/index.html index c8415470c86..82a22d02fca 100644 --- a/docs/1.1.1/deployment/quick_deploy/index.html +++ b/docs/1.1.1/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html index eb969a42ef4..35a13ffb09e 100644 --- a/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/start_metadatasource/index.html b/docs/1.1.1/deployment/start_metadatasource/index.html index 3bb5a9754a6..7b3fe062084 100644 --- a/docs/1.1.1/deployment/start_metadatasource/index.html +++ b/docs/1.1.1/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html b/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html index 773958dc306..d4888c6742c 100644 --- a/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/web_install/index.html b/docs/1.1.1/deployment/web_install/index.html index c228c15bdcd..dcc5c7f82bd 100644 --- a/docs/1.1.1/deployment/web_install/index.html +++ b/docs/1.1.1/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_compile_and_package/index.html b/docs/1.1.1/development/linkis_compile_and_package/index.html index 21ffa37b623..9a8f575425a 100644 --- a/docs/1.1.1/development/linkis_compile_and_package/index.html +++ b/docs/1.1.1/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_config/index.html b/docs/1.1.1/development/linkis_config/index.html index 0b77897ac02..e116d2f757f 100644 --- a/docs/1.1.1/development/linkis_config/index.html +++ b/docs/1.1.1/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_debug/index.html b/docs/1.1.1/development/linkis_debug/index.html index 5caeb957023..8f67d2b2743 100644 --- a/docs/1.1.1/development/linkis_debug/index.html +++ b/docs/1.1.1/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -46,7 +46,7 @@ screenshot of enterprise wechat _16500167527083

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_debug_in_mac/index.html b/docs/1.1.1/development/linkis_debug_in_mac/index.html index 4b69052224b..f43b6875f34 100644 --- a/docs/1.1.1/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.1/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/development/new_engine_conn/index.html b/docs/1.1.1/development/new_engine_conn/index.html index 44a624fba2e..8a81fdcc40a 100644 --- a/docs/1.1.1/development/new_engine_conn/index.html +++ b/docs/1.1.1/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/web_build/index.html b/docs/1.1.1/development/web_build/index.html index ac8006df710..210b601b306 100644 --- a/docs/1.1.1/development/web_build/index.html +++ b/docs/1.1.1/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/flink/index.html b/docs/1.1.1/engine_usage/flink/index.html index 3e937666619..95b692ab813 100644 --- a/docs/1.1.1/engine_usage/flink/index.html +++ b/docs/1.1.1/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/hive/index.html b/docs/1.1.1/engine_usage/hive/index.html index e423af9dac0..4f509b0afa8 100644 --- a/docs/1.1.1/engine_usage/hive/index.html +++ b/docs/1.1.1/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/jdbc/index.html b/docs/1.1.1/engine_usage/jdbc/index.html index 2bc9b2cfe97..d6d67c3fe04 100644 --- a/docs/1.1.1/engine_usage/jdbc/index.html +++ b/docs/1.1.1/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/openlookeng/index.html b/docs/1.1.1/engine_usage/openlookeng/index.html index 410eef15da0..c0a2edcba1e 100644 --- a/docs/1.1.1/engine_usage/openlookeng/index.html +++ b/docs/1.1.1/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/overview/index.html b/docs/1.1.1/engine_usage/overview/index.html index 0d085129a21..08fbc97cf0f 100644 --- a/docs/1.1.1/engine_usage/overview/index.html +++ b/docs/1.1.1/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/pipeline/index.html b/docs/1.1.1/engine_usage/pipeline/index.html index 71c03eef1e0..f969ada0847 100644 --- a/docs/1.1.1/engine_usage/pipeline/index.html +++ b/docs/1.1.1/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ pipeline engine | Apache Linkis - + @@ -20,7 +20,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/python/index.html b/docs/1.1.1/engine_usage/python/index.html index 6563a4345ab..546d74ebb5d 100644 --- a/docs/1.1.1/engine_usage/python/index.html +++ b/docs/1.1.1/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/shell/index.html b/docs/1.1.1/engine_usage/shell/index.html index 66792d3cd3e..972721f74db 100644 --- a/docs/1.1.1/engine_usage/shell/index.html +++ b/docs/1.1.1/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/spark/index.html b/docs/1.1.1/engine_usage/spark/index.html index 1d195d93d15..4e3d6e55855 100644 --- a/docs/1.1.1/engine_usage/spark/index.html +++ b/docs/1.1.1/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.1/introduction/index.html b/docs/1.1.1/introduction/index.html index 2d4b6c82e6d..89e7bc9fb5d 100644 --- a/docs/1.1.1/introduction/index.html +++ b/docs/1.1.1/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.1/release/index.html b/docs/1.1.1/release/index.html index 2c104302080..a9b0ece1ee6 100644 --- a/docs/1.1.1/release/index.html +++ b/docs/1.1.1/release/index.html @@ -7,7 +7,7 @@ Version overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Version overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    ec-openlookengNewlinkis.openlookeng.engineconn.concurrent.limit100Concurrency Limit
    ec-openlookengNewlinkis.openlookeng.http.connectTimeout60LClient request timeout time http request based on OKhttp
    ec-openlookengNewlinkis.openlookeng.http.readTimeout60LClient read timeout HTTP request built on OKhttp
    ec-openlookengNewlinkis.openlookeng.urlhttp://127.0.0.1:8080openlookeng service
    ec-openlookengNewlinkis.openlookeng.catalogsystemcatalog
    ec-openlookengNewlinkis.openlookeng.schemaschema
    ec-openlookengNewlinkis.openlookeng.sourceglobalsource

    DB Table Changes#

    For details, see the upgrade schemadb/upgrade/1.1.1_schema file in the corresponding branch of the code repository (https://github.com/apache/incubator-linkis).

    - + \ No newline at end of file diff --git a/docs/1.1.1/table/udf-table/index.html b/docs/1.1.1/table/udf-table/index.html index fde38e9b5af..fdafdb716a8 100644 --- a/docs/1.1.1/table/udf-table/index.html +++ b/docs/1.1.1/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF table structure | Apache Linkis - + @@ -16,7 +16,7 @@ udf_type 3: custom function - python functionudf_type 4: custom function - scala function

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/1.1.1/tags/index.html b/docs/1.1.1/tags/index.html index c0ea5f42dfa..0edaa6e7eb4 100644 --- a/docs/1.1.1/tags/index.html +++ b/docs/1.1.1/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html index 4afd184c644..da09144afce 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html b/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html index 21721db24a6..d44a1cab6d3 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html @@ -7,7 +7,7 @@ Error Code | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Error Code

    Error code 01001#

    error description

    An error occurs during task execution:error code : 01001,error msg : Your task is not routed to the background ECM,Please contact the administrator

    The em of labels

    Reason 1

    Your task is not routed to the background ECM

    Solution 1

    1. Check whether the tenant label is correct

    2. If yes, contact the administrator


    Error code 01002#

    error description

    An error occurs during task execution:error code : 01002,error msg : The Linkis service load is too high. Contact the administrator to expand the capacity

    Unexpected end of file from server

    Reason 1

    The linkis service load is too high, resulting in abnormal service connection

    Solution 1

    Please contact the administrator


    Error code 01003#

    error description

    An error occurs during task execution:error code : 01003,error msg : The linkis service load is too high. Please contact the administrator for capacity expansion

    failed to ask linkis Manager Can be retried SocketTimeoutException

    Reason 1

    The link service load is too high, resulting in service connection timeout

    Solution 1

    Contact administrator


    Error code 01004#

    error description

    An error occurs during task execution:error code : 01004,error msg : The engine is killed at startup,Please contact the administrator

    [0-9]+ Killed

    Reason 1

    The engine was killed at startup because the machine load was too high

    Solution 1

    1. you can choose to retry
    2. or contact the administrator

    Error code 01005#

    error description

    An error occurs during task execution:error code : 01005,error msg : Request Yan to get the queue information and retry for 2 times still failed, please contact the administrator

    Failed to request external resourceClassCastException

    Reason 1

    Failed to request Yan to obtain queue information. This is caused by a configuration problem

    Solution 1

    Please contact the administrator


    Error code 01101#

    error description

    An error occurs during task execution:error code : 01101,error msg : ECM resources are insufficient, please contact the administrator for capacity expansion

    ECM resources are insufficient

    Reason 1

    Due to insufficient server resources, possibly during peak hours

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01102#

    error description

    An error occurs during task execution:error code : 01102,error msg : ECM memory resources are insufficient. Please contact the administrator for capacity expansion

    ECM memory resources are insufficient

    Reason 1

    Insufficient server memory resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01103#

    error description

    An error occurs during task execution:error code : 01103,error msg : ECM CPU resources are insufficient. Please contact the administrator for capacity expansion

    ECM CPU resources are insufficient

    Reason 1

    Insufficient server CPU resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01104#

    error description

    An error occurs during task execution:error code : 01104,error msg : Instance resources are insufficient. Please contact the administrator for capacity expansion

    ECM Insufficient number of instances

    Reason 1

    Insufficient server instance resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01105#

    error description

    An error occurs during task execution:error code : 01105,error msg : The machine is out of memory. Please contact the administrator for capacity expansion

    Cannot allocate memory

    Reason 1

    Server machine out of memory

    Solution 1

    1. you can retry the task

    2. or contact the administrator


    Error code 12001#

    error description

    An error occurs during task execution:error code : 12001,error msg : The queue CPU resource is insufficient. You can adjust the number of spark actuators

    Queue CPU resources are insufficient

    Reason 1

    The queue CPU resource is insufficient, exceeding the limit you set

    Solution 1

    • Open the DSS platform and click management console -- parameter configuration -- ide -- spark -- display advanced settings -- Walker engine resource settings (2) -- adjust the number of concurrent executors [spark.executor.instances]

    • Or adjust the upper limit of queue resource usage on the management console -- parameter configuration -- global settings


    Error code 12002#

    error description

    An error occurs during task execution:error code : 12002,error msg : The queue memory resources are insufficient. You can adjust the number of spark actuators

    Insufficient queue memory

    Reason 1

    The queue memory resource is insufficient, which exceeds the queue memory resource value you set

    Solution 1

    • Open the DSS platform and click management console -- parameter configuration -- ide -- spark -- display advanced settings -- Walker engine resource settings (2) -- adjust the number of concurrent executors [spark.executor.instances]

    • Or adjust the upper limit of queue resource usage on the management console - parameter configuration - global settings


    Error code 12003#

    error description

    An error occurs during task execution:error code : 12003,error msg : The number of queue instances exceeds the limit

    Insufficient number of queue instances

    Reason 1

    The number of queue instances exceeds the limit

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - maximum number of yarn queue instances [wds.links.rm.yarnqueue.instance.max]

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12004#

    error description

    An error occurs during task execution:error code : 12004,error msg : Global drive memory usage limit, lower drive memory can be set

    Drive memory resources are insufficient

    Reason 1

    Global drive memory exceeds maximum

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - maximum number of yarn queue instances [wds.links.rm.yarnqueue.instance.max]

    Solution 2

    • If the queue is available and the number of application instances is too low, you can contact the administrator to set

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12005#

    error description

    An error occurs during task execution:error code : 12005,error msg : If the maximum number of global drive CPUs is exceeded, idle engines can be cleaned up

    Drive core resources are insufficient

    Reason 1

    The number of global drive CPUs exceeds the maximum

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - upper limit of queue CPU [wds.links.rm.yarnqueue.cores.max]

    Solution 2

    • Clean up idle engines

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12006#

    error description

    An error occurs during task execution:error code : 12006,error msg : If the maximum number of concurrent engines is exceeded, idle engines can be cleaned up

    Insufficient number of instances

    Reason 1

    Maximum engine concurrency exceeded

    Solution 1

    • Modify the configuration and global configuration: open the DSS platform and click management console - parameter configuration - global settings - queue resources - global maximum concurrency of each engine [wds.links.rm.instance]
    • spark engine
    • hive engine
    • python engine
    • pipeline engine

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12008#

    error description

    An error occurs during task execution:error code : 12008,error msg : Exception in getting the yarn queue information. It may be that the yarn queue you set does not exist

    获取Yarn队列信息异常

    Reason 1

    Exception in getting Yan queue information

    Solution 1

    • If the cluster is normal and the user queue is configured incorrectly:
    • Linkis management console / parameter configuration > global settings >yarn queue name [wds.linkis.rm.yarnqueue]

    Solution 2

    • If the cluster is a new cluster, first check the cluster configuration of linkismanager

      Hadoop cluster address: http://ip:8088/cluster

      yarn cluster address:http://ip:8888/cluster/scheduler

      Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12009#

    error description

    An error occurs during task execution:error code : 12009,error msg : Session creation failed. The%s queue does not exist. Please check whether the queue settings are correct

    queue (\S+) is not exists in YARN

    Reason 1

    The queue does not exist. Please check whether the queue settings are correct

    Solution 1

    • The user contacts the administrator to confirm whether the queue is correct

    Error code 12010#

    error description

    An error occurs during task execution:error code : 12010,error msg : The cluster queue memory resources are insufficient. You can contact people in the group to release resources

    Insufficient cluster queue memory

    Reason 1

    Insufficient cluster queue memory resources

    Solution 1

    • Check whether the resource memory is full. The user contacts the personnel in the group to release the resource, or applies for queue expansion

    Error code 12011#

    error description

    An error occurs during task execution:error code : 12011,error msg : Cluster queue CPU resources are insufficient. You can contact people in the group to release resources

    Insufficient cluster queue cpu

    Reason 1

    Insufficient cluster queue CPU resources

    Solution 1

    • Check whether the resource CPU is full. The user contacts the personnel in the group to release the resource, or applies for queue expansion

    Error code 12013#

    error description

    An error occurs during task execution:error code : 12013,error msg : Insufficient resources cause the engine to timeout. You can retry the task

    wait for DefaultEngineConn

    Reason 1

    Starting the engine timed out due to insufficient resources

    Solution 1

    The user retries the task. If it occurs repeatedly, please contact the administrator for troubleshooting


    Error code 12014#

    error description

    An error occurs during task execution:error code : 12014,error msg : The request engine timed out, which may be caused by insufficient queue resources. Please try again

    wait for engineConn initial timeout

    Reason 1

    Request engine timed out

    Solution 1

    The user retries the task. If it occurs repeatedly, please contact the administrator for troubleshooting


    Error code 13001#

    error description

    An error occurs during task execution:error code : 13001,error msg : Java process memory overflow, it is recommended to optimize the script content

    OutOfMemoryError

    Reason 1

    Java process memory overflow

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Error code 13002#

    error description

    An error occurs during task execution:error code : 13002,error msg : The use of resources is too large. Please tune SQL or increase resources

    Container killed by YARN for exceeding memory limits

    Reason 1

    Use resources too large

    Solution 1

    • Increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13003#

    error description

    An error occurs during task execution:error code : 13003,error msg : The use of resources is too large. Please tune SQL or increase resources

    read record exception

    Reason 1

    Use resources too large

    Solution 1

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13004#

    error description

    An error occurs during task execution:error code : 13004,error msg : The engine exited unexpectedly, which may be caused by excessive use of resources

    failed because the engine quitted unexpectedly

    Reason 1

    Unexpected engine exit

    Solution 1

    Contact the administrator for troubleshooting


    Error code 13005#

    error description

    An error occurs during task execution:error code : 13005,error msg : Spark app exit may be caused by complex tasks

    Spark application has already stopped

    Reason 1

    Spark app exit may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Solution 2

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13006#

    error description

    An error occurs during task execution:error code : 13006,error msg : Spark context exits, which may be caused by complex tasks

    Spark application has already stopped

    Reason 1

    Spark context exits, which may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Solution 2

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13007#

    error description

    An error occurs during task execution:error code : 13007,error msg : Pyspark child process exited unexpectedly, which may be caused by complex tasks

    Pyspark process has stopped

    Reason 1

    Pyspark child process exited unexpectedly, which may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Error code 21001#

    error description

    An error occurs during task execution:error code : 21001,error msg : Session creation failed, user%s cannot submit application to queue:%s, please contact the person who provided the queue to you

    User (\S+) cannot submit applications to queue (\S+)

    Reason 1

    Session creation failed, user%s cannot submit application to queue

    Solution 1

    The user queue does not have permission. Please check whether the queue configuration is wrong or apply for queue permission


    Error code 21002#

    error description

    An error occurs during task execution:error code : 21002,error msg : Failed to create Python interpreter, please contact the administrator

    initialize python executor failed

    Reason 1

    Failed to create Python interpreter, please contact the administrator

    Solution 1

    Contact the operation and maintenance personnel for troubleshooting


    Error code 21003#

    error description

    An error occurs during task execution:error code : 21003,error msg : Failed to create stand-alone Python interpreter, please contact the administrator

    PythonSession process cannot be initialized

    Reason 1

    Failed to create Python interpreter, please contact the administrator

    Solution 1

    Contact the operation and maintenance personnel for troubleshooting


    Error code 22001#

    error description

    An error occurs during task execution:error code : 22001,error msg : %S has no permission to access. Please apply for permission to open the data table. Please contact your data management personnel

    Permission denied:\suser=[a-zA-Z0-9_]+,\saccess=[A-Z]+\s,\sinode="([a-zA-Z0-9/_.]+)

    Reason 1

    Unauthorized access

    Solution 1

    • Database table permission needs to be applied to

    Error code 22003#

    error description

    An error occurs during task execution:error code : 22003,error msg : The checked database table has no permission

    Authorization failed:No privilege

    Reason 1

    Unauthorized access

    Solution 1

    • Database table permission needs to be applied to

    Error code 22004#

    error description

    An error occurs during task execution:error code : 22004,error msg : The user%s does not exist on the machine. Please confirm whether you have applied for relevant permissions

    user (\S+) does not exist

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22005#

    error description

    An error occurs during task execution:error code : 22005,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    engineConnExec.sh: Permission denied

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22006#

    error description

    An error occurs during task execution:error code : 22006,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    at com.sun.security.auth.UnixPrincipal

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22007#

    error description

    An error occurs during task execution:error code : 22007,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    LoginException: java.lang.NullPointerException: invalid null input: name

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22008#

    error description

    An error occurs during task execution:error code : 22008,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    User not known to the underlying authentication module

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 30001#

    error description

    An error occurs during task execution:error code : 30001,error msg : Library exceeds limit

    is exceeded

    Reason 1

    Library exceeds limit

    Solution 1

    Users clean up data by themselves

    Solution 2

    Apply for database expansion


    Error code 31001#

    error description

    An error occurs during task execution:error code : 31001,error msg : User active kill task

    is killed by user

    Reason 1

    User active kill task

    Solution 1

    • If it is confirmed that the user has not actively killed, please contact the operation and maintenance personnel for troubleshooting

    Error code 31002#

    error description

    An error occurs during task execution:error code : 31002,error msg : The enginetypelabel you submitted does not have a corresponding engine version

    EngineConnPluginNotFoundException

    Reason 1

    Enginetypelabel has no corresponding engine version

    Solution 1

    • The user checks whether the enginetypelabel passed is correct. If it is correct, please contact the operation and maintenance personnel for troubleshooting
    • Inspection method of the operation and maintenance personnel: the lib/links-engineconn-plugins/ on the linkis ECP node is the local cache of all available engine plug-ins. This is not possible because the corresponding version of the engine is not available, or there are other abnormal format files in the engine file, such as Bak, you shouldn't have put zip. Zip, etc

    Error code 41001#

    error description

    An error occurs during task execution:error code : 41001,error msg : The database%s does not exist. Please check whether the referenced database is correct

    Database '([a-zA-Z_0-9]+)' not found

    Reason 1

    Database %s does not exist

    Solution 1

    • User checks whether the database exists and permissions
    • show databases


    Error code 41001#

    error description

    An error occurs during task execution:error code : 41001,error msg : The database%s does not exist. Please check whether the referenced database is correct

    Database does not exist: ([a-zA-Z_0-9]+)

    Reason 1

    Database %s does not exist

    Solution 1

    • User checks whether the database exists and permissions
    • show databases


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    cannot resolve '(.+)' given input columns

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    Column '(.+)' cannot be resolved

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    Invalid table alias or column reference '(.+)':

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Partition field%s does not exist. Please check whether the referenced table is a partition table or the partition field is incorrect

    Partition spec {(\S+)} contains non-partition columns

    Reason 1

    Partition field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Partition field%s does not exist. Please check whether the referenced table is a partition table or the partition field is incorrect

    table is not partitioned but partition spec exists:{(.+)}

    Reason 1

    Partition field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : The path corresponding to the table does not exist. Please contact your data manager

    Path does not exist: viewfs

    Reason 1

    Partition path does not exist

    Solution 1

    • Please try refresh table XXX, or the kill engine runs again, but there are still exceptions. Please contact the data management personnel for troubleshooting

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Field%s does not exist, please check whether the referenced table%s is a partition table or the partition field is incorrect

    ([a-zA-Z_0-9]+) is not a valid partition column in table ([`.a-zA-Z_0-9]+)

    Reason 1

    Field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41005#

    error description

    An error occurs during task execution:error code : 41005,error msg : File %s does not exist

    Caused by:\s*java.io.FileNotFoundException

    Reason 1

    File %s does not exist

    Solution 1

    • Please try refresh table XXX, or the kill engine runs again, but there are still exceptions. Please contact the data management personnel for troubleshooting

    Error code 42003#

    error description

    An error occurs during task execution:error code : 42003,error msg : Unknown function%s, please check whether the function referenced in the code is correct

    Undefined function: '(\S+)'

    Reason 1

    Error in referenced function

    Solution 1

    • If it is a UDF, please check the function. If it is a public function, please contact the operation and maintenance personnel for troubleshooting
    • udf address

    Error code 42003#

    error description

    An error occurs during task execution:error code : 42003,error msg : Unknown function%s, please check whether the function referenced in the code is correct

    Invalid function '(\S+)'

    Reason 1

    Error in referenced function

    Solution 1

    • If it is a UDF, please check the function. If it is a public function, please contact the operation and maintenance personnel for troubleshooting
    • udf address

    Error code 42004#

    error description

    An error occurs during task execution:error code : 42004,error msg : There is a name conflict in the field%s, please check whether there is a field with the same name in the sub query

    Ambiguous column Reference '(\S+)' in subquery

    Reason 1

    Name conflict in field %s

    Solution 1

    • User checks whether there is a duplicate name field

    Error code 42004#

    error description

    An error occurs during task execution:error code : 42004,error msg : There is a name conflict in the field%s, please check whether there is a field with the same name in the sub query

    Reference '(\S+)' is ambiguous

    Reason 1

    Name conflict in field %s

    Solution 1

    • User checks whether there is a duplicate name field

    Error code 42005#

    error description

    An error occurs during task execution:error code : 42005,error msg : The field%s must specify a table or subquery alias. Please check the source of the field

    Column '(\S+)' Found in more than One Tables/Subqueries

    Reason 1

    Field does not specify a table

    Solution 1

    • User added field source

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : The table%s already exists in the database. Please delete the corresponding table and try again

    Table already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    AnalysisException: (S+) already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    Table (\S+) already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    Table or view '(\S+)' already exists in database '(\S+)'

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42007#

    error description

    An error occurs during task execution:error code : 42007,error msg : The number of fields in the inserted target table does not match, please check the code!

    requires that the data to be inserted have the same number of columns as the target table

    Reason 1

    Insert target table field quantity mismatch

    Solution 1

    • User check code

    Error code 42008#

    error description

    An error occurs during task execution:error code : 42008,error msg : Data type does not match, please check the code!

    due to data type mismatch: differing types in

    Reason 1

    data type mismatch

    Solution 1

    • User check code

    Error code 42009#

    error description

    An error occurs during task execution:error code : 42009,error msg : The reference of field%s is incorrect. Please check whether the field exists!

    Invalid column reference (S+)

    Reason 1

    Incorrect reference to field %s

    Solution 1

    • User checks whether the field exists

    Error code 42010#

    error description

    An error occurs during task execution:error code : 42010,error msg : Failed to extract data for field %s

    Can't extract value from (S+): need

    Reason 1

    Failed to extract data for field %s

    Solution 1

    • Check whether the selected field is incorrect

    Error code 42012#

    error description

    An error occurs during task execution:error code : 42012,error msg : Group by position 2 is not in the select list, please check the code!

    GROUP BY position (S+) is not in select list

    Reason 1

    The field of group by is not in the select list

    Solution 1

    • User check code

    Error code 42014#

    error description

    An error occurs during task execution:error code : 42014,error msg : Insert data does not specify target table field%s, please check the code!

    Cannot insert into target table because column number/types are different '(S+)'

    Reason 1

    The inserted data does not correspond to the fields of the target table

    Solution 1

    • User check code

    Error code 42016#

    error description

    An error occurs during task execution:error code : 42016,error msg : UDF function does not specify a parameter, please check the code!

    UDFArgumentException Argument expected

    Reason 1

    UDF function does not specify full parameters

    Solution 1

    • User check code

    Error code 42017#

    error description

    An error occurs during task execution:error code : 42017,error msg : Aggregate function%s cannot be written in group by, please check the code!

    aggregate functions are not allowed in GROUP BY

    Reason 1

    Aggregate function%s cannot be written in group by, please check the code!

    Solution 1

    • User check code

    Error code 43007#

    error description

    An error occurs during task execution:error code : 43007,error msg : Pyspark execution failed, possibly due to syntax error or stage failure

    Py4JJavaError: An error occurred

    Reason 1

    Syntax error or stage failure

    Solution 1

    • If it is a syntax error, you need to check the code for modification
    • If the stage fails, you can choose to retry

    Error code 43011#

    error description

    An error occurs during task execution:error code : 43011,error msg : Export excel table exceeds the maximum limit of 1048575

    Invalid row number

    Reason 1

    Data volume exceeds the limit of a single sheet

    Solution 1

    • Reduce the amount of data to export, or export to CSV format

    Error code 43040#

    error description

    An error occurs during task execution:error code : 43040,error msg : Presto query must specify data source and Library Information

    Schema must be specified when session schema is not set

    Reason 1

    Data source configuration error

    Solution 1

    • Check management console Presto data source configuration
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idepresto -- data source configuration

    Error code 46001#

    error description

    An error occurs during task execution:error code : 46001,error msg : Import file address not found:%s

    java.io.FileNotFoundException: (\S+) (No such file or directory)

    Reason 1

    file does not exist

    Solution 1

    • Please check the workspace, or check whether the files in the HDFS directory exist

    Error code 46002#

    error description

    An error occurs during task execution:error code : 46002,error msg : Exception of temporary file directory permission when exporting to excel

    java.io.IOException: Permission denied(.+) at org.apache.poi.xssf.streaming.SXSSFWorkbook.createAndRegisterSXSSFSheet

    Reason 1

    Abnormal file directory or insufficient file read / write permission

    Solution 1

    • Please confirm that the file has read-write permission. If there is any abnormality, please contact the operation and maintenance personnel for handling

    Error code 46003#

    error description

    An error occurs during task execution:error code : 46003,error msg : Unable to create directory while exporting file:%s

    java.io.IOException: Mkdirs failed to create (\S+) (.+)

    Reason 1

    Unable to create directory

    Solution 1

    • Contact the operation and maintenance personnel for troubleshooting

    Error code 46004#

    error description

    An error occurs during task execution:error code : 46004,error msg : Error importing module. The system does not have a%s module. Please contact the operation and maintenance personnel to install it

    ImportError: No module named (S+)

    Reason 1

    The system does not have a %s module

    Solution 1

    • Contact the operation and maintenance personnel for troubleshooting
    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html b/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html index 7440966aa6a..911033ea3b0 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html @@ -7,7 +7,7 @@ Interface error troubleshooting | Apache Linkis - + @@ -22,7 +22,7 @@ lready. , ip: bdpujes110003 ,port: 9101 ,serviceKind: linkis-cg-linkismanager

    Where IP and port are the corresponding service addresses, and servicekind is the corresponding service name. If the RPC call log fails, you can use this information to find the corresponding service

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/overview/index.html b/docs/1.1.1/tuning_and_troubleshooting/overview/index.html index 01f52beee2f..b5884e0b372 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html index 7b5a3272a1d..6a8788d4aba 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 395fa383fb5..d93f682da67 100644 --- a/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.1/upgrade/upgrade_guide/index.html b/docs/1.1.1/upgrade/upgrade_guide/index.html index 0b4e085cb0c..e3591798aa0 100644 --- a/docs/1.1.1/upgrade/upgrade_guide/index.html +++ b/docs/1.1.1/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/console_manual/index.html b/docs/1.1.1/user_guide/console_manual/index.html index 3df044ab58b..15aa58aabf2 100644 --- a/docs/1.1.1/user_guide/console_manual/index.html +++ b/docs/1.1.1/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/how_to_use/index.html b/docs/1.1.1/user_guide/how_to_use/index.html index 7e84c8dbb0f..c6076e44d91 100644 --- a/docs/1.1.1/user_guide/how_to_use/index.html +++ b/docs/1.1.1/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/linkis-datasource-client/index.html b/docs/1.1.1/user_guide/linkis-datasource-client/index.html index 1ec12db0d2e..84e2cdbc2b1 100644 --- a/docs/1.1.1/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.1/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/linkiscli_manual/index.html b/docs/1.1.1/user_guide/linkiscli_manual/index.html index eeff89e6616..95070f0e47d 100644 --- a/docs/1.1.1/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.1/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/overview/index.html b/docs/1.1.1/user_guide/overview/index.html index 03975293bb5..2dd8a0b9e96 100644 --- a/docs/1.1.1/user_guide/overview/index.html +++ b/docs/1.1.1/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/sdk_manual/index.html b/docs/1.1.1/user_guide/sdk_manual/index.html index 14b49a9c6c1..fa95e499489 100644 --- a/docs/1.1.1/user_guide/sdk_manual/index.html +++ b/docs/1.1.1/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/udf/index.html b/docs/1.1.1/user_guide/udf/index.html index f769c9279c2..f9302a7e049 100644 --- a/docs/1.1.1/user_guide/udf/index.html +++ b/docs/1.1.1/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 086adf594a9..05e57138674 100644 --- a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ Engine Plugin Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Engine Plugin Api

    EnginePluginRestful class

    refresh#

    Interface address:/api/rest_j/v1/engineplugin/refresh

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh a single resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ecTypetypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    refresh all#

    Interface address:/api/rest_j/v1/engineplugin/refreshAll

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh all ec resources

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index cea4cc06803..7369b9cbfd9 100644 --- a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html index 6c405d9210c..f3b79ea6782 100644 --- a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ Task Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Task Management

    EntranceMetricRestfulApi class

    Task management

    start task#

    Interface address:/api/rest_j/v1/entrance/api/metrics/runningtask

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Start task

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task info#

    Interface address:/api/rest_j/v1/entrance/api/metrics/taskinfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    engineTypeLabelEngine Type Labelqueryfalsestring
    useruserqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html index 5f1f537b45a..58ff7ac51c1 100644 --- a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ Task Action | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Task Action

    EntranceRestfulApi class

    process task request#

    Interface address:/api/rest_j/v1/entrance/execute

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    The execute function handles the request submitted by the user to execute the task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonjsonbodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Submit the execute function#

    Interface address:/api/rest_j/v1/entrance/submit

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Submit execute function

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    SubmitjsonbodytrueSubmitSubmit

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    end task#

    Interface address: /api/rest_j/v1/entrance/{id}/kill

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    kill task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    End Jobs#

    Interface address: /api/rest_j/v1/entrance/{id}/killJobs

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    End Jobs

    Request example:

    {    "taskIDList": [],    "idList": []}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    idid request path generationtruestringstring
    taskIDListcollection of task IDsfalseStringString
    idListID collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/entrance/#id/killJobs",    "status": 0,    "message": "success",    "data": {        "messages": [{            "method": "",            "status": 0,            "message": "",            "data": {                "execID": ""            }        }]    }}

    task log#

    Interface address: /api/rest_j/v1/entrance/{id}/log

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get task log

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Pause task#

    Interface address:/api/rest_j/v1/entrance/{id}/pause

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Pause task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Mission progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progress

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progressWithResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Resource progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task status#

    Interface address:/api/rest_j/v1/entrance/{id}/status

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task status

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 84d7ffe6e9a..256fea6a89e 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    EC Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index 9caedb2894e..fcb4f601364 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    ECM Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    ECM resource list#

    Interface address: /api/rest_j/v1/linkisManager/listAllEMs

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get a detailed list of all ECM resources, which can be queried according to conditions, and query all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    instanceinstance namequeryfalsestring
    nodeHealthyStatus, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'queryfalsestring
    ownerCreatorqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/listAllEMs",    "status": 0,    "message": "OK",    "data": {        "EMs": [{            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "instance": "",                "serviceInstance": {                    "instance": "",                    "applicationName": ""                },                "serviceName": "",                "featureKey": "",                "empty":            }],            "applicationName": "",            "instance": ":",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": ,                "instance":            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "owner": "",            "runningTasks": null,            "pendingTasks": null,            "succeedTasks": null,            "failedTasks": null,            "maxMemory": ,            "usedMemory": ,            "systemCPUUsed": null,            "systemLeftMemory": ,            "nodeHealthy": "",            "msg": "",            "startTime":        }]    }}

    Edit EMC instance#

    Interface address: /api/rest_j/v1/linkisManager/modifyEMInfo

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Edit or modify the instance under EMC management

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameEngine LabelfalseStringString
    emStatusInstance status, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'falseStringString
    instanceEngine instance namefalseStringString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionfalseStringString
    labelsThe engine instance updates the parameter content, and the collection stores the map typefalseListList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/modifyEMInfo",    "status": 0,    "message": "success",    "data": {}}

    Open engine log#

    Interface address: /api/rest_j/v1/linkisManager/openEngineLog

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Open the engine log, the stdout type engine log is opened by default

    Request example:

    {    applicationName: ""    emInstance: ""    instance: ""    parameters: {        pageSize: ,        fromLine: ,        logType: ""    }}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    emInstanceInstance nameStringfalseString
    fromLineFrom LineStringfalseString
    instanceEngine instance nameStringfalseString
    logTypeLog type, default stdout type, belonging to parametersStringfalseString
    pageSizePage SizeStringfalseString
    parametersPagination informationMapfalseMap

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/openEngineLog",    "status": 0,    "message": "OK",    "data": {        "result": {            "logPath": "",            "logs": [""],            "endLine": ,            "rows":        },        "isError": false,        "errorMsg": ""    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index b59285ed617..912d5fd532a 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ Engine Management | Apache Linkis - + @@ -16,7 +16,7 @@

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameThe application name, the outermost layer is an array and the engineInstance parameter is a levelfalseStringString
    engineInstanceThe name of the engine instance, the outermost layer is an array and the applicationName parameter is a levelfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 3ef3e7a801a..75e42fba04c 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Resource Management

    RMMonitorRest class

    All user resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/allUserResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    All user resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    pagepagequeryfalseinteger(int32)
    sizesizequeryfalseinteger(int32)
    usernameusernamequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "total": 34,        "resources": [{            "id": ,            "username": "",            "creator": "",            "engineTypeWithVersion": "",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": "instance": 0            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": null,            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "createTime": ,            "updateTime": ,            "loadResourceStatus": "",            "queueResourceStatus":        }]    }}

    Application List#

    Interface address: /api/rest_j/v1/linkisManager/rm/applicationlist

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get the list of application engines in resource management

    Request example:

    {    userCreator: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    userCreatoruserCreatorquerytrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": ,    "status": ,    "message": "",    "data": {        "applications": [{            "creator": "",            "applicationList": {                "usedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "maxResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "lockedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "minResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "engineInstances": [{                    "resource": {                        "resourceType": "",                        "maxResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "minResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "usedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "lockedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "expectedResource": null,                        "leftResource": {                            "memory": ,                            "cores": ,                            "instance":                        }                    },                    "engineType": "",                    "owner": "",                    "instance": "",                    "creator": "",                    "startTime": "",                    "status": "",                    "label": ""                }]            }        }]    }}

    EngineType#

    Interface address: /api/rest_j/v1/linkisManager/rm/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    Engine Type

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Engine manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/engines

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Engine Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/queueresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue#

    Interface address: /api/rest_j/v1/linkisManager/rm/queues

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/resetResource

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Reset resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdresourceIdqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource information#

    Interface address: /api/rest_j/v1/linkisManager/rm/userresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Query resource list and detailed resource data such as usage percentage

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {            "userResources": [{            "userCreator": "",            "engineTypes": [{            "engineType": "",            "percent": ""            }],    "percent": ""        }]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html index d343c59a2c4..4bf5c49343b 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ Context History Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context History Service

    ContextHistoryRestfulApi class

    create history#

    Interface address:/api/rest_j/v1/contextservice/createHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Create History

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get multiple histories#

    Interface address:/api/rest_j/v1/contextservice/getHistories

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get multiple history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get history#

    Interface address:/api/rest_j/v1/contextservice/getHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    sourceContext SourcefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete history#

    Interface address:/api/rest_j/v1/contextservice/removeHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    search history#

    Interface address:/api/rest_j/v1/contextservice/searchHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Search history

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    keywordsKeywordsfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index a9ed2771e2a..38eb08f0cc1 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ Context Listening Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context Listening Service

    ContextListenerRestfulApi class

    Context listener service

    heartbeat#

    Interface address:/api/rest_j/v1/contextservice/heartbeat

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindIDListener#

    Interface address:/api/rest_j/v1/contextservice/onBindIDListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindKeyListener#

    Interface address:/api/rest_j/v1/contextservice/onBindKeyListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 69dadba147e..5d06ebf87ed 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ Context Logging Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context Logging Service

    ContextIDRestfulApi class

    create text record#

    Interface address: /api/rest_j/v1/contextservice/createContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create text record

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIDContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get text ID#

    Interface address: /api/rest_j/v1/contextservice/getContextID

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIdContextIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete text ID#

    Interface address: /api/rest_j/v1/contextservice/removeContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset text ID#

    Interface address: /api/rest_j/v1/contextservice/resetContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Reset Text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Search text Id execution time#

    Interface address:/api/rest_j/v1/contextservice/searchContextIDByTime

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Search text ID execution time

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    accessTimeEndAccess end timequeryfalsestring
    accessTimeStartAccess Start Timequeryfalsestring
    createTimeEndCreate end timequeryfalsestring
    createTimeStartcreate timequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    updateTimeEndUpdate end timequeryfalsestring
    updateTimeStartUpdate timequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Modify text ID#

    Interface address: /api/rest_j/v1/contextservice/updateContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Modify text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html index 66e5e1ab1f8..5e993e3b521 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ Context API | Apache Linkis - + @@ -33,7 +33,7 @@ |contextKey|contextKey|false|String|String|

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 612c2216226..07d3971dc5c 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM Project Operation Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BM Project Operation Management

    BmlProjectRestful class

    Attachment resource item#

    Interface address:/api/rest_j/v1/bml/attachResourceAndProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Attachment resource item

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    projectNameproject namestringfalsestring
    resourceidresource namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Create BML project#

    Interface address:/api/rest_j/v1/bml/createBmlProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Create BML project

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    accessusersaccess usersstringfalsestring
    editusersedit userstringfalsestring
    projectNameproject namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download shared resources#

    Interface address:/api/rest_j/v1/bml/downloadShareResource

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Download shared resources

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    Resourceidresource IDqueryfalsestring
    Versionversionqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Project information#

    Interface address:/api/rest_j/v1/bml/getProjectInfo

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Project information

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    ProjectNameproject namequeryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update project user#

    Interface address:/api/rest_j/v1/bml/updateProjectUsers

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Update project users

    Request parameters:

    parameter nameparameter descriptionwhether it is requiredrequest typedata typeschema
    accessusersaccess usersfalsestringstring
    editusersedit userfalsestringstring
    projectNameproject namefalsestringstring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update shared resources#

    Interface address:/api/rest_j/v1/bml/updateShareResource

    Request mode:POST

    Request data type:multipart/form-data

    Response data type:*/*

    Interface description:

    Update shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    filefileformdatafalseref
    resourceidresource IDqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Upload shared resources#

    Interface address:/api/rest_j/v1/bml/uploadShareResource

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Upload shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    expireTimeexpiration timequeryfalsestring
    expiretypefailure typequeryfalsestring
    filefile setformdatafalseref
    isexpireinvalidqueryfalsestring
    maxversionMAV versionqueryfalseref
    projectNameproject namequeryfalsestring
    resourceheaderresource headerqueryfalsestring
    systemsystemqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 3181f29281e..3850367de76 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BML Resource Management

    BmlRestfulApi class

    update owner#

    Interface address:/api/rest_j/v1/bml/changeOwner

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update owner

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newOwnerOld OwnerfalseStringString
    oldOwnerNew OwnerfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Copy resources to other users#

    Interface address:/api/rest_j/v1/bml/copyResourceToAnotherUser

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Copy resources to specified user

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    anotherUserspecified userfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete resource#

    Interface address:/api/rest_j/v1/bml/deleteResource

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete multiple resources#

    Interface address:/api/rest_j/v1/bml/deleteResources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete multiple resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdsCollection of resource IDs, delete multiple resourcestrueListList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete version#

    Interface address:/api/rest_j/v1/bml/deleteVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download resources#

    Interface address:/api/rest_j/v1/bml/download

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the resources corresponding to download through the two parameters of resourceId and version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring
    versionResource version, if not specified, defaults to latestqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get Basic#

    Interface address:/api/rest_j/v1/bml/getBasic

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Basic

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResourceInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResources

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get version information#

    Interface address: /api/rest_j/v1/bml/getVersions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get bml version information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    resourceIdResource IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rollback version#

    Interface address:/api/rest_j/v1/bml/rollbackVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rollback version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdfalseStringString
    versionRollback versionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    update resource#

    Interface address:/api/rest_j/v1/bml/updateVersion

    Request method: POST

    Request data type: multipart/form-data

    Response data type: */*

    Interface description:

    Users update resource files through http

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefilefileformDatatrueref
    resourceIdresourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload resources#

    Interface address:/api/rest_j/v1/bml/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatatruearrayfile
    expireTimeexpireTimequeryfalsestring
    expireTypeexpireTypequeryfalsestring
    isExpireisExpirequeryfalsestring
    maxVersionmaxVersionqueryfalseinteger(int32)
    resourceHeaderresourceHeaderqueryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index 5d3e2752594..1a97780e597 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BMLFS Management

    BMLFsRestfulApi class

    Open ScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    projectNameProject namequeryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    -product-openScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/product/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    /product/openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save script from BML#

    Interface address:/api/rest_j/v1/filesystem/saveScriptToBML

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script from BML

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    creatorCreatortrueStringString
    fileNameFile nametrueStringString
    metadatametadatatrueStringString
    projectNameProject NametrueStringString
    resourceIdResource IDtrueStringString
    scriptContentContenttrueStringString
    SaveScriptToBMLjsonbodytrueSaveScriptToBMLSaveScriptToBML

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html index 8e575fa190c..103b00a8e20 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ Ceneric Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Ceneric Api

    CommonRestfulApi class

    offline#

    Interface address:/api/rest_j/v1/offline

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Offline

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index b5d88d12a5e..3679a9d726e 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html index 867c51514ed..da15159da5b 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ Filesystem | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Filesystem

    FsRestfulApi class

    create new Dir#

    Interface address:/api/rest_j/v1/filesystem/createNewDir

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new Dir

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    create new file#

    Interface address: /api/rest_j/v1/filesystem/createNewFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete dir file or file#

    Interface address: /api/rest_j/v1/filesystem/deleteDirOrFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete dir file or file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    download#

    Interface address:/api/rest_j/v1/filesystem/download

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    charsetCharsettrueStringString
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    file info#

    Interface address:/api/rest_j/v1/filesystem/fileInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    File Information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    format#

    Interface address:/api/rest_j/v1/filesystem/formate

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    encodingencodingquerytruestring
    escapeQuotesescapeQuotesquerytruestring
    fieldDelimiterField Delimiterquerytruestring
    hasHeaderHash valuequerytrueboolean
    quotequotequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    function list#

    Interface address:/api/rest_j/v1/filesystem/getDirFileTrees

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of udf functions

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathrequest pathquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/getDirFileTrees",    "status": 0,    "message": "OK",    "data": {        "dirFileTrees": {            "name": "",            "path": "",            "properties": null,            "children": [{                "name": "",                "path": "",                "properties": {                    "size": "",                    "modifytime": ""                },                "children": ,                "isLeaf": ,                "parentPath": ""            }],            "isLeaf": ,            "parentPath":        }    }}

    root path#

    Interface address:/api/rest_j/v1/filesystem/getUserRootPath

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get root path

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathTypeFileTypequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    does it exist#

    Interface address: /api/rest_j/v1/filesystem/isExist

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Whether it exists

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    open a file#

    Interface address: /api/rest_j/v1/filesystem/openFile

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": [{            "columnName": "_c0",            "comment": "NULL",            "dataType": ""        }],        "totalPage": ,        "totalLine": ,        "page": ,        "type": "",        "fileContent": [            [""]        ]    }}

    Turn on logging#

    Interface address:/api/rest_j/v1/filesystem/openLog

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open logging

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressqueryfalsestring
    proxyUserProxy Userqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openLog",    "status": 0,    "message": "OK",    "data": {        "log": ["", ""]    }}

    Rename#

    Interface address:/api/rest_j/v1/filesystem/rename

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rename the file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newDestnew namefalseStringString
    oldDestold namefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert the result set to Excel#

    Interface address: /api/rest_j/v1/filesystem/resultsetToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Convert the result set to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetresult setqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitlimitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file typequeryfalsestring
    pathaddressqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert resultsets to Excel#

    Interface address:/api/rest_j/v1/filesystem/resultsetsToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoquerytrueboolean
    limitlimitquerytrueref
    nullValuenull valuequerytruestring
    outputFileNameOutput file namequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save the script#

    Interface address:/api/rest_j/v1/filesystem/saveScript

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathaddresstrueStringString
    SaveScriptjsonbodytrueSaveScriptSaveScript
    charsetCharsetfalseStringString
    paramsPage SizefalseObjectObject
    scriptContentpage numberfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload#

    Interface address:/api/rest_j/v1/filesystem/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload files, multiple files can be uploaded

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatafalseref
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index 5ada78589a0..2690bd599cf 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ Add Global Variable | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Add Global Variable

    VariableRestfulApi class

    add global variables#

    Interface address:/api/rest_j/v1/variable/saveGlobalVariable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add global variables

    Request example:

    {    globalVariables: [{        keyID: ,        key: "",        valueID: ,        value: ""    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    globalVariablesAdded parameter data one-to-many key:globalVariables,value:ListMaptrueMap
    keyParameter name, belonging to globalVariablesStringtrueString
    valuevariable value, and key belong to the key-value pair that is contained by globalVariablesListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/variable/saveGlobalVariable",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index 9ec7ef12cb8..a2a3627b6fb 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ Admin Console Home Page Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Admin Console Home Page Interface

    QueryRestfulApi class

    admin authentication#

    Interface address:/api/rest_j/v1/jobhistory/governanceStationAdmin

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Used to verify whether it is an administrator, if it is, it will return true if it is not false

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    data: {        solution: null,        admin: true    }    message: "OK"    method: "/api/jobhistory/governanceStationAdmin"    status: 0}

    global history#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Acquire global historical data list according to conditions and get all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    executeApplicationNameoperatorqueryfalsestring
    isAdminViewWhether it is in administrator mode or normal modequeryfalseboolean
    pageSizeNumber of pagesqueryfalseref
    proxyUserProxy Userqueryfalsestring
    startDateStart timequeryfalseinteger(int64)
    statusend timequeryfalsestring
    taskIDIDqueryfalseinteger(int64)
    tpageNowpage numberqueryfalseref
    pageNowpageNowqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    {        "method": "/api/jobhistory/list",        "status": 0,        "message": "OK",        "data": {            "solution": null,            "totalPage": 90,            "tasks": [{                "taskID": ,                "instance": ",                "execId": "",                "umUser": "",                "engineInstance": null,                "executionCode": "",                "progress": "",                "logPath": "",                "resultLocation": "",                "status": "",                "createdTime": ,                "updatedTime": ,                "engineType": "",                "errCode": 0,                "errDesc": "",                "executeApplicationName": "",                "requestApplicationName": "",                "runType": "datachecker",                "paramsJson": "",                "costTime": 1000,                "strongerExecId": "",                "sourceJson": "",                "sourceTailor": "",                "engineStartTime": null,                "labels": [],                "canRetry": ,                "subJobs":            }]        }    }}

    list undo#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Undo list

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    engineTypeengineTypequeryfalsestring
    pageNowpageNowqueryfalseref
    pageSizepageSizequeryfalseref
    startDateStart timequeryfalseref
    startTaskIDstartTaskIDqueryfalseinteger(int64)
    statusstatusqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    History details#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the detailed information of a history through the history ID

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idHistoryIdqueryfalseinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/jobhistory/1928730/get",    "status": 0,    "message": "OK",    "data": {        "task": {            "taskID": ,            "instance": "",            "execId": "",            "umUser": "",            "engineInstance": "",            "executionCode": "",            "progress": "",            "logPath": "",            "resultLocation": "",            "status": "",            "createdTime":,            "updatedTime": ,            "engineType": "",            "errCode": ,            "errDesc": "",            "executeApplicationName": "",            "requestApplicationName": "",            "runType": "hql",            "paramsJson": "",            "costTime": ,            "strongerExecId": "",            "sourceJson": "",            "sourceTailor": "",            "engineStartTime": null,            "labels": [""],            "canRetry": false,            "subJobs": null        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index cf07a0c9e8d..7f1719b251b 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ Instance Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Instance Management

    InstanceRestful class

    Microservice instance list#

    Interface address: /api/rest_j/v1/microservice/allInstance

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the list of microservice management module instances to get single or multiple default all

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "instances": [{            "id": ,            "updateTime": ,            "createTime": ,            "applicationName": ",            "instance": "",            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "id": 5,                "labelValueSize": 0,                "modifiable": true,                "updateTime": ,                "createTime": ,                "featureKey": "",                "empty":            }]        }]    }}

    Get eurekaURL#

    Interface address: /api/rest_j/v1/microservice/eurekaURL

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    return eurekaURL

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "url": ""    }}

    Edit the microservice instance#

    Interface address: /api/rest_j/v1/microservice/instanceLabel

    Request method: PUT

    Request data type: application/json

    Response data type: */*

    Interface description:

    Edit or modify the instance in microservice management

    Request example:

    {    applicationName: "linkis-ps-cs"    instance: "bdpdws110004:9108"    labels: [{        labelKey: "route",        stringValue: "cs_2_dev"    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    instanceEngine instance nameStringfalseString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionStringfalseString
    labelsThe engine instance updates the parameter content, and the collection stores the map typeListfalseList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "success",    "data": {        "labels": [{            "stringValue": "",            "labelKey": "",            "feature": null,            "modifiable": ,            "featureKey": "",            "empty":        }]    }}

    Modifiable label types#

    Interface address:/api/rest_j/v1/microservice/modifiableLabelKey

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of label types that can be modified, such as 'userCreator, route'

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {    "keyList": []    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index 9b74f3971b2..d7448ff25f6 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ History Job Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "admin": true    }}

    getHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ididpathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    listHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    taskIDtaskIDpathfalseLong
    executeApplicationNameexecuteApplicationNamepathfalsestring
    creatorcreatorpathfalsestring
    proxyUserproxyUserpathfalsestring
    isAdminViewisAdminViewpathfalseBoolean

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}

    listUndoneHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    startTaskIDstartTaskIDpathfalseLong
    engineTypeengineTypepathfalsestring
    creatorcreatorpathfalsestring

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html index 5c95777d296..5d8156b2e68 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis Error Codes | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis Error Codes

    LinkisErrorCodeRestful class

    Get Linkis error code#

    Interface address:/api/rest_j/v1/errorcode/getAllErrorCodes

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Linkis error code list

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index b41ca1f38ba..71c118c0619 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq Table Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Mdq Table Interface

    MdqTableRestfulApi class

    Activate table operations#

    Interface address:/api/rest_j/v1/datasource/active

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Activate table operation

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableIdTable IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Generate the DDL statement for the new library table#

    Interface address:/api/rest_j/v1/datasource/displaysql

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Generate DDL statement for new library table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableTableStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get partition statistics#

    Interface address:/api/rest_j/v1/datasource/getPartitionStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get partition statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    partitionSortPartition SortStringfalseString
    tableNametable namequeryfalsestring
    partitionPathpartitionPathqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table information#

    Interface address:/api/rest_j/v1/datasource/getTableBaseInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table field information#

    Interface address:/api/rest_j/v1/datasource/getTableFieldsInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table field information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table statistics#

    Interface address:/api/rest_j/v1/datasource/getTableStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    partitionSortPartition Sortqueryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Active ID#

    Interface address:/api/rest_j/v1/datasource/persistTable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Activated logo

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    tableTablefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index 08e628f6da2..41f15c7a809 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index d6a7246ccaf..7afc7987a03 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ Parameter Configuration | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Parameter Configuration

    ConfigurationRestfulApi class

    Add KeyForEngine#

    Interface address:/api/rest_j/v1/configuration/addKeyForEngine

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Add KeyForEngine

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    engineTypeengineTypequeryfalsestring
    keyJsonkeyJsonqueryfalsestring
    tokentokenqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Add application type#

    Interface address:/api/rest_j/v1/configuration/createFirstCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add application type tag

    Request example:

    {    "categoryName": "",    "description": ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryNameReference type label namefalseStringString
    descriptionDescriptionfalseStringSTRing

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createFirstCategory",    "status": 0,    "message": "OK",    "data": {}}

    Add parameter configuration#

    Interface address:/api/rest_j/v1/configuration/createSecondCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add parameter configuration

    Request example:

    {    categoryId: ,    description: "",    engineType: "",    version: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryIdParameter ConfigurationIdtrueStringString
    descriptionDescriptiontrueStringString
    engineTypeEngine TypetrueStringString
    versionversion numbertrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createSecondCategory",    "status": 0,    "message": "OK",    "data": {}}

    delete configuration#

    Interface address: /api/rest_j/v1/configuration/deleteCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete parameter configuration

    Request example:

    {    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdParameter ConfigurationIdStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/deleteCategory",    "status": 0,    "message": "OK",    "data": {}}

    Engine type list#

    Interface address:/api/rest_j/v1/configuration/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of engine types

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/engineType",    "status": 0,    "message": "OK",    "data": {    "engineType": []    }}

    App types#

    Interface address: /api/rest_j/v1/configuration/getCategory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Apply type tag in parameter configuration

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getCategory",    "status": 0,    "message": "OK",    "data": {        "Category": [{            "categoryId": ,            "labelId": ,            "categoryName": "",            "childCategory": [],            "description": null,            "tag": null,            "createTime": ,            "updateTime": ,            "level": ,            "fatherCategoryName": ""        }],        "description": null,        "tag": null,        "createTime": ,        "updateTime": ,        "level": ,        "fatherCategoryName":    }]}}

    queue resources#

    Interface address:/api/rest_j/v1/configuration/getFullTreesByAppName

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    The queue resource module in the parameter configuration returns the column and value of the queue resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorlabel namequeryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getFullTreesByAppName",    "status": 0,    "message": "OK",    "data": {        "fullTree": [{            "name": "Queue Resource",            "description": null,            "settings": [{                "id": ,                "key": "",                "description": "",                "name": "",                "defaultValue": "",                "validateType": "",                "validateRange": "[]",                "level": 1,                "engineType": ,                "treeName": "",                "valueId": ,                "configValue": "",                "configLabelId": ,                "unit": null,                "isUserDefined": ,                "hidden": ,                "advanced":            }]        }]    }}

    Get key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get key value

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    configKeyconfigKeyquerytruestring
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    configValueconfigValuetrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString
    SaveKeyValuejsonbodytrueSaveKeyValueSaveKeyValue

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Delete key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rpc test#

    Interface address: /api/rest_j/v1/configuration/rpcTest

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    rpc test

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    usernameusernamequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save queue resources#

    Interface address:/api/rest_j/v1/configuration/saveFullTree

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save queue resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorApp Type NameStringtrueString
    descriptionDescription, belonging to the content in fullTreeStringtrueString
    engineTypeEngine TypeStringtrueString
    fullTreeDetails under Application TypeListtrueList
    nameQueue resource name, which belongs to the content in fullTreeStringtrueString
    settingsDetailed content in the queue resource, belonging to the content in fullTreeListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/saveFullTree",    "status": 0,    "message": "OK",    "data": {}}

    Update category information#

    Interface address: /api/rest_j/v1/configuration/updateCategoryInfo

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update category information

    Sample Response:

    {    description: "",    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdcategoryIdStringtrueString
    descriptiondescriptionStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/updateCategoryInfo",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html index e65ae459a9c..64bbf59d883 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF Operations Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    UDF Operations Management

    UDFApi class

    new#

    Interface address:/api/rest_j/v1/udf/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    clusterNameclusterNamefalseStringString
    createTimeCreateTimefalseDateDate
    createUserCreatorfalseStringString
    descriptionDescriptionfalseStringString
    directoryCategory, personal function first-level directoryfalseStringString
    isExpireis invalidfalseBooleanBoolean
    isLoadWhether to loadfalseBooleanBoolean
    isSharedSharedfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    syssysfalseStringString
    treeIdtreeIdfalseLongLong
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    updateTimeUpdate timefalseDateDate
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf tree menu#

    Interface address:/api/rest_j/v1/udf/all

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get detailed information of udf tree menu

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathRequest PathfalseStringString
    jsonStringjsonStringfalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get udf user list#

    Interface address:/api/rest_j/v1/udf/allUdfUsers

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get udf user list

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    confirmed#

    Interface address: /api/rest_j/v1/udf/authenticate

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Prove...is real

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Publish#

    Interface address:/api/rest_j/v1/udf/publish

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF version released

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    fallback version#

    Interface address:/api/rest_j/v1/udf/rollback

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Back to version

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    set expiration#

    Interface address:/api/rest_j/v1/udf/setExpire

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Setting expired

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseLongLong

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF sharing#

    Interface address: /api/rest_j/v1/udf/shareUDF

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF sharing

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    sharedUserssharedUsersfalseListList
    udfInfoudfInfofalseUDFInfoUDFInfo

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree new#

    Interface address:/api/rest_j/v1/udf/tree/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree delete#

    Interface address:/api/rest_j/v1/udf/tree/delete/{id}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    tree delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree update#

    Interface address:/api/rest_j/v1/udf/tree/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree update

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    renew#

    Interface address:/api/rest_j/v1/udf/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF modification

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    descriptionDescriptionfalseStringString
    ididfalseLongLong
    isLoadWhether to loadfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get user directory#

    Interface address: /api/rest_j/v1/udf/userDirectory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the first-level classification of the user's personal function

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryGet the user directory of the specified collection type, if the type is UDF, get the user directory under this typefalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    version list#

    Interface address:/api/rest_j/v1/udf/versionList

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    View version list

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/jdbc_api/index.html b/docs/1.1.3/api/jdbc_api/index.html index aad5e08ad94..60c82c9a754 100644 --- a/docs/1.1.3/api/jdbc_api/index.html +++ b/docs/1.1.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/linkis_task_operator/index.html b/docs/1.1.3/api/linkis_task_operator/index.html index 00d6825e5f4..36732c9eed9 100644 --- a/docs/1.1.3/api/linkis_task_operator/index.html +++ b/docs/1.1.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/login_api/index.html b/docs/1.1.3/api/login_api/index.html index bb76e7a707d..dee839b2b9e 100644 --- a/docs/1.1.3/api/login_api/index.html +++ b/docs/1.1.3/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/overview/index.html b/docs/1.1.3/api/overview/index.html index 276591bbd37..f1f3384f667 100644 --- a/docs/1.1.3/api/overview/index.html +++ b/docs/1.1.3/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/commons/rpc/index.html b/docs/1.1.3/architecture/commons/rpc/index.html index 25e7e2aee2c..25429558b53 100644 --- a/docs/1.1.3/architecture/commons/rpc/index.html +++ b/docs/1.1.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/commons/variable/index.html b/docs/1.1.3/architecture/commons/variable/index.html index be79af1919b..f4d3b726c17 100644 --- a/docs/1.1.3/architecture/commons/variable/index.html +++ b/docs/1.1.3/architecture/commons/variable/index.html @@ -7,7 +7,7 @@ Custom Variable Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Custom Variable Design

    1. General#

    Requirements Background#

         Users want to be able to define some common variables when writing code and then replace them during execution. For example, users run the same sql in batches every day, and need to specify the partition time of the previous day. If based on sql It will be more complicated to write if the system provides a variable of run_date which will be very convenient to use.

    Target#

    1. Support variable substitution of task code
    2. Support custom variables, support users to define custom variables in scripts and task parameters submitted to Linkis, support simple +, - and other calculations
    3. Preset system variables: run_date, run_month, run_today and other system variables

    2. Overall Design#

         During the execution of the Linkis task, the custom variables are carried out in Entrance, mainly through the interceptor of Entrance before the task is submitted and executed. The variable and the defined variable, and complete the code replacement through the initial value of the custom variable passed in by the task, and become the final executable code.

    2.1 Technical Architecture#

         The overall structure of custom variables is as follows. After the task is submitted, it will go through the variable replacement interceptor. First, all variables and expressions used in the code will be parsed, and then replaced with the system and user-defined initial values ​​of variables, and finally the parsed code will be submitted to EngineConn for execution. So the underlying engine is already replaced code.

    var_arc

    3 Function introduction#

         The variable types supported by Linkis are divided into custom variables and system built-in variables. The internal variables are predefined by Linkis and can be used directly. Then different variable types support different calculation formats: String supports +, integer decimal supports +-*/, date supports +-.

    3.1 Built-in variables#

    The currently supported built-in variables are as follows:

    variable namevariable typevariable meaningvariable value example
    run_dateStringData statistics time (support user's own setting, the default setting is the day before the current time), if the data of yesterday is executed today, it will be the time of yesterday, the format is yyyyMMdd20180129
    run_date_stdStringData statistics time (standard date format), if yesterday's data is executed today, it will be yesterday's time, the format is yyyy-MM-dd2018-01-29
    run_todayStringThe day after run_date (data statistics time), the format is yyyyMMdd20211210
    run_today_stdStringThe day after run_date (data statistics time) (standard format), the format is yyyy-MM-dd2021-12-10
    run_monStringThe month of the data statistics time, the format is yyyyMM202112
    run_mon_stdStringThe month of the data statistics time (standard format), the format is yyyy-MM2021-12
    run_month_beginStringThe first day of the month in which the data is counted, in the format yyyyMMdd20180101
    run_month_begin_stdStringThe first day of the month where the data statistics time is (standard date format), the format is yyyy-MM-dd2018-01-01
    run_month_now_beginStringThe first day of the month where run_today is in the format yyyyMMdd20211201
    run_month_now_begin_stdStringThe first day of the month run_today (standard format), the format is yyyy-MM-dd2021-12-01
    run_month_endStringThe last day of the month in which the data is counted, in the format yyyyMMdd20180131
    run_month_end_stdStringThe last day of the month in which the data is counted (standard date format), the format is yyyy-MM-dd2018-01-31
    run_month_now_endStringThe last day of the month where run_today is in the format yyyyMMdd20211231
    run_month_now_end_stdStringThe last day of the month in which run_today is located (standard date format), the format is yyyy-MM-dd2021-12-31
    run_quarter_beginStringThe first day of the quarter in which the data is counted, in the format yyyyMMdd20210401
    run_quarter_endStringThe last day of the quarter in which the data is counted, in the format yyyyMMdd20210630
    run_half_year_beginStringThe first day of the half year where the data statistics time is located, in the format yyyyMMdd20210101
    run_half_year_endStringThe last day of the half year where the data statistics time is located, the format is yyyyMMdd20210630
    run_year_beginStringThe first day of the year in which the data is counted, in the format yyyyMMdd20210101
    run_year_endStringThe last day of the year in which the data is counted, in the format yyyyMMdd20211231
    run_quarter_begin_stdStringThe first day of the quarter in which the data is counted (standard format), the format is yyyy-MM-dd2021-10-01
    run_quarter_end_stdStringThe last day of the quarter where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-12-31
    run_half_year_begin_stdStringThe first day of the half year where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-07-01
    run_half_year_end_stdStringThe last day of the half year where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-12-31
    run_year_begin_stdStringThe first day of the year in which the data is counted (standard format), the format is yyyy-MM-dd2021-01-01
    run_year_end_stdStringThe last day of the year in which the data is counted (standard format), the format is yyyy-MM-dd2021-12-31

    details:

    1. run_date is the core built-in date variable, which supports user-defined date. If not specified, the default is the day before the current system time.
    2. Definition of other derived built-in date variables: other date built-in variables are calculated relative to run_date. Once run_date changes, other variable values ​​will also change automatically. Other date variables do not support setting initial values ​​and can only be modified by modifying run_date. .
    3. Built-in variables support more abundant usage scenarios: ${run_date-1} is the day before run_data; ${run_month_begin-1} is the first day of the previous month of run_month_begin, where -1 means minus one month.

    3.2 Custom variables#

         What are custom variables? User variables that are defined first and then used. User-defined variables temporarily support the definition of strings, integers, and floating-point variables. Strings support the + method, and integers and floating-point numbers support the +-*/ method. User-defined variables do not conflict with the set variable syntax supported by SparkSQL and HQL, but the same name is not allowed. How to define and use custom variables? as follows:

    ## Defined in the code, specified before the task codesql type definition method:--@set f=20.1The python/shell types are defined as follows:#@set f=20.1Note: Only one variable can be defined on one line

    The use is directly used in the code through {varName expression}, such as ${f*2}

    3.3 Variable scope#

    Custom variables in linkis also have scope, and the priority is that the variable defined in the script is greater than the Variable defined in the task parameter is greater than the built-in run_date variable. The task parameters are defined as follows:

    ##restful{    "executionContent": {"code": "select \"${f-1}\";", "runType": "sql"},    "params": {                    "variable": {f: "20.1"},                    "configuration": {                            "runtime": {                                "linkis.openlookeng.url":"http://127.0.0.1:9090"                                }                            }                    },    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.sql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}## java SDKJobSubmitAction.builder  .addExecuteCode(code)  .setStartupParams(startupMap)  .setUser(user) //submit user  .addExecuteUser(user) //execute user  .setLabels(labels)  .setVariableMap(varMap) //setVar  .build
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index 42c3e95c47f..7d3d088274c 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Start engineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html index c63a14c1c28..626658ecc07 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html index 9543cd4a34c..eed606c8fa4 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html @@ -7,7 +7,7 @@ EngineConn History Features | Apache Linkis - + @@ -16,7 +16,7 @@ engineconn-history-02.png

    4. Data structure:#

    # EC information resource record tableDROP TABLE IF EXISTS `linkis_cg_ec_resource_info_record`;CREATE TABLE `linkis_cg_ec_resource_info_record` (    `id` INT(20) NOT NULL AUTO_INCREMENT,    `label_value` VARCHAR(255) NOT NULL COMMENT 'ec labels stringValue',    `create_user` VARCHAR(128) NOT NULL COMMENT 'ec create user',    `service_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ec instance info',    `ecm_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ecm instance info ',    `ticket_id` VARCHAR(100) NOT NULL COMMENT 'ec ticket id',    `log_dir_suffix` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'log path',    `request_times` INT(8) COMMENT 'resource request times',    `request_resource` VARCHAR(255) COMMENT 'request resource',    `used_times` INT(8) COMMENT 'resource used times',    `used_resource` VARCHAR(255) COMMENT 'used resource',    `release_times` INT(8) COMMENT 'resource released times',    `released_resource` VARCHAR(255) COMMENT 'released resource',    `release_time` datetime DEFAULT NULL COMMENT 'released time',    `used_time` datetime DEFAULT NULL COMMENT 'used time',    `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',    PRIMARY KEY (`id`),    KEY (`ticket_id`),    UNIQUE KEY `label_value_ticket_id` (`ticket_id`, `label_value`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;

    5. Interface Design#

    Engine history management page API interface, refer to the document Add history engine page to the management console

    6. Non-functional design#

    6.1 Security#

    No security issues are involved, the restful interface requires login authentication

    6.2 Performance#

    Less impact on engine life cycle performance

    6.3 Capacity#

    Requires regular cleaning

    6.4 High Availability#

    not involving

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index c119b4e8f84..05ad2d12060 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html index 34dfa173b42..96a455f417e 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html @@ -7,7 +7,7 @@ EngineConn Metrics reporting feature | Apache Linkis - + @@ -21,7 +21,7 @@ The callback method parses the resource, progress, and engine metrancs information in TaskRunningInfo and persists them respectively.

    engineconn-mitrics-2.png

    4. Data structure#

    RPC protocol TaskRunningInfo has been added to the requirement, no db table has been added

    5. Interface Design#

    No external interface

    6. Non-functional design:#

    6.1 Security#

    RPC interface internal authentication, does not involve external security issues

    6.2 Performance#

    Combined two RPC interfaces to reduce the number of reports and improve performance

    6.3 Capacity#

    Less metrics information, no impact

    6.4 High Availability#

    not involving

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 64a4b0aaf02..bbf2e3b0e84 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/entrance/index.html b/docs/1.1.3/architecture/computation_governance_services/entrance/index.html index 99b2c916184..c62f55ac74f 100644 --- a/docs/1.1.3/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index 21bf2512736..c2ab02f29ae 100644 --- a/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html index c01ae950be8..2dfeba6240b 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index c4802770652..30960dd4ba1 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index ab400402298..c6784739e3b 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 9a513de0fa5..665ac400296 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 375399ea6a8..33822be90d8 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/overview/index.html b/docs/1.1.3/architecture/computation_governance_services/overview/index.html index e00f03ac3ab..bb3e85d03a7 100644 --- a/docs/1.1.3/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html b/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html index cf7f27f97ed..086cd91813b 100644 --- a/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html index af146a52698..ba406780e9c 100644 --- a/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html index b3f64598919..0dc4197ea0c 100644 --- a/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/microservice_governance_services/overview/index.html b/docs/1.1.3/architecture/microservice_governance_services/overview/index.html index b46bd3b5003..b2e85f71c97 100644 --- a/docs/1.1.3/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/overview/index.html b/docs/1.1.3/architecture/overview/index.html index e50280e007d..fcfae5e9d76 100644 --- a/docs/1.1.3/architecture/overview/index.html +++ b/docs/1.1.3/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index bbdf60ea7b9..8a7f19c93d5 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html index 0f7e8717540..1d361f53ba5 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html index cb446e93e05..ffb6382dcc4 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html @@ -7,7 +7,7 @@ CS Cleanup Interface Features | Apache Linkis - + @@ -44,7 +44,7 @@

    6. Non-functional design#

    6.1 Security#

    The resultful interface requires login authentication and requires an administrator to operate. The administrator user is configured in the properties file

    6.2 Performance#

    • The query ID interface searchContextIDByTime has paging, no performance impact
    • Clear the specified ID interface clearAllContextByID to limit the amount of operation data, no performance impact
    • The interface clearAllContextByTime is cleared according to the time. If the query time range is too large, the query may time out, but the task will not fail. and the cleanup operation is a single operation and does not affect other queries

    6.3 Capacity#

    This requirement provides a time range query and batch cleaning interface, which requires the upper-layer application that uses ContextService to actively clean up data.

    6.4 High Availability#

    The interface reuses the high availability of the ContextService microservice itself.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html index e6b5191239e..d8c4dc6746f 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index bdbcdf83f2c..d25982d69e1 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index 43816102568..7fd779ef951 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 11337fc6e72..409c987dcc3 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 515939f3db9..5edce30de09 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index df670c6bcd3..f3dc3eb675a 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index eab1045a2df..314421681a5 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html index a05e0537a1c..1a6ec025767 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html index 4d6bd2bec34..f3c29287cb8 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html index e5e81445c94..eaf4211dcd0 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/overview/index.html index 321641773de..96882d3ad77 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html index 1640b3cb7dd..8c4ca5ee4b8 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/cluster_deployment/index.html b/docs/1.1.3/deployment/cluster_deployment/index.html index 7f550796410..56c709757c5 100644 --- a/docs/1.1.3/deployment/cluster_deployment/index.html +++ b/docs/1.1.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html b/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html index 819b06a6ccf..d675f914f5f 100644 --- a/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html +++ b/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Deploy Linkis without HDFS | Apache Linkis - + @@ -20,7 +20,7 @@ [INFO] Retrieving result-set, may take time if result-set is large, please do not exit program.============ RESULT SET 1 ============hello ############Execute Success!!!########
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html index 548d7ca640e..73546c34ff0 100644 --- a/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/installation_hierarchical_structure/index.html b/docs/1.1.3/deployment/installation_hierarchical_structure/index.html index 43ffc81ce24..caf1a5a7dfe 100644 --- a/docs/1.1.3/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html b/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html index e9da0246d3a..0255a71632d 100644 --- a/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html @@ -7,7 +7,7 @@ Involve Knife4j into Linkis | Apache Linkis - + @@ -21,7 +21,7 @@

    For detailed usage guidelines, please visit the knife4j official website to view:https://doc.xiaominfo.com/knife4j/

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html b/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html index 57db0bc5c08..6370a220542 100644 --- a/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html @@ -7,7 +7,7 @@ Involve Prometheus into Linkis | Apache Linkis - + @@ -31,7 +31,7 @@ Then you can view one living dashboard of Linkis there.

    You can also try to integrate the Prometheus alter manager with your own webhook, where you can see if the alter message is fired.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html index 58970bc3bb1..fdd1ddb5a29 100644 --- a/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/linkis_scriptis_install/index.html b/docs/1.1.3/deployment/linkis_scriptis_install/index.html index dfa37f08796..de294eb81aa 100644 --- a/docs/1.1.3/deployment/linkis_scriptis_install/index.html +++ b/docs/1.1.3/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of tool scripts | Apache Linkis - + @@ -26,7 +26,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note that in nginx, the difference between root and alias is used in the locaiton configuration block

    • The processing result of root is: root path + location path.
    • The result of alias processing is: replace the location path with the alias path.
    • alias is the definition of a directory alias, root is the definition of the top-level directory

    5 scriptis usage steps#

    5.1 Log in to the linkis console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because scriptis requires login verification, you need to log in first and get the cookie.

    5.2 Access the scriptis page after successful login#

    #http://10.10.10.10:8080/scriptis/#/homehttp://nginxIp:port/scriptis/#/home

    nginxIp: nginx server ip, port: port number for linkis management console nginx configuration startup, scriptis is the location address of nginx configuration for requesting scriptis project static files (can be customized)

    4.3 Using scriptis#

    Take creating a new SQL query task as an example.

    step1 Create a new script and select the script type as sql type

    Rendering

    step2 Enter the statement to be queried

    Rendering

    step3 run

    Rendering

    shep4 View Results

    Rendering

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/quick_deploy/index.html b/docs/1.1.3/deployment/quick_deploy/index.html index 8048afafde9..5c0e3cc80de 100644 --- a/docs/1.1.3/deployment/quick_deploy/index.html +++ b/docs/1.1.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on Linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html index 411ef835e05..751e6f3a1ce 100644 --- a/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/start_metadatasource/index.html b/docs/1.1.3/deployment/start_metadatasource/index.html index ad075a40f61..3372084cb76 100644 --- a/docs/1.1.3/deployment/start_metadatasource/index.html +++ b/docs/1.1.3/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html b/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html index 074222c3c2f..eee0a511e65 100644 --- a/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/web_install/index.html b/docs/1.1.3/deployment/web_install/index.html index 8585c807afd..b085d215465 100644 --- a/docs/1.1.3/deployment/web_install/index.html +++ b/docs/1.1.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_compile_and_package/index.html b/docs/1.1.3/development/linkis_compile_and_package/index.html index 3591d14d3c3..7adc37b33bc 100644 --- a/docs/1.1.3/development/linkis_compile_and_package/index.html +++ b/docs/1.1.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_config/index.html b/docs/1.1.3/development/linkis_config/index.html index 1c8f65c55fe..0c0b5f110d6 100644 --- a/docs/1.1.3/development/linkis_config/index.html +++ b/docs/1.1.3/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_debug/index.html b/docs/1.1.3/development/linkis_debug/index.html index 0b3a71d6d64..4d73e3f3953 100644 --- a/docs/1.1.3/development/linkis_debug/index.html +++ b/docs/1.1.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -49,7 +49,7 @@ y

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_debug_in_mac/index.html b/docs/1.1.3/development/linkis_debug_in_mac/index.html index 0dc1751510e..cc8cba5c896 100644 --- a/docs/1.1.3/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.3/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/development/new_engine_conn/index.html b/docs/1.1.3/development/new_engine_conn/index.html index 2fd4be803aa..de629c842de 100644 --- a/docs/1.1.3/development/new_engine_conn/index.html +++ b/docs/1.1.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -52,7 +52,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/web_build/index.html b/docs/1.1.3/development/web_build/index.html index 067bdfef15a..a4a045f49b0 100644 --- a/docs/1.1.3/development/web_build/index.html +++ b/docs/1.1.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/flink/index.html b/docs/1.1.3/engine_usage/flink/index.html index 9543a1b69fe..7303aaa49ce 100644 --- a/docs/1.1.3/engine_usage/flink/index.html +++ b/docs/1.1.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/hive/index.html b/docs/1.1.3/engine_usage/hive/index.html index 5cc70ecc631..5e898f7dc08 100644 --- a/docs/1.1.3/engine_usage/hive/index.html +++ b/docs/1.1.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/jdbc/index.html b/docs/1.1.3/engine_usage/jdbc/index.html index 8210404dd29..01e19474fed 100644 --- a/docs/1.1.3/engine_usage/jdbc/index.html +++ b/docs/1.1.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/openlookeng/index.html b/docs/1.1.3/engine_usage/openlookeng/index.html index 97315f10468..6e9dec23fd2 100644 --- a/docs/1.1.3/engine_usage/openlookeng/index.html +++ b/docs/1.1.3/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/overview/index.html b/docs/1.1.3/engine_usage/overview/index.html index 6d5943dad57..7fd4e381ef0 100644 --- a/docs/1.1.3/engine_usage/overview/index.html +++ b/docs/1.1.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/pipeline/index.html b/docs/1.1.3/engine_usage/pipeline/index.html index 8be89c8703e..39c34710f93 100644 --- a/docs/1.1.3/engine_usage/pipeline/index.html +++ b/docs/1.1.3/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ pipeline engine | Apache Linkis - + @@ -20,7 +20,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/python/index.html b/docs/1.1.3/engine_usage/python/index.html index a62ae564cbb..99d75679cd7 100644 --- a/docs/1.1.3/engine_usage/python/index.html +++ b/docs/1.1.3/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/shell/index.html b/docs/1.1.3/engine_usage/shell/index.html index 06c26a33c56..85879d754b8 100644 --- a/docs/1.1.3/engine_usage/shell/index.html +++ b/docs/1.1.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/spark/index.html b/docs/1.1.3/engine_usage/spark/index.html index 27690d59b1c..218ae52f10a 100644 --- a/docs/1.1.3/engine_usage/spark/index.html +++ b/docs/1.1.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/sqoop/index.html b/docs/1.1.3/engine_usage/sqoop/index.html index ea5ecc5212d..f072410c666 100644 --- a/docs/1.1.3/engine_usage/sqoop/index.html +++ b/docs/1.1.3/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop Engine | Apache Linkis - + @@ -25,7 +25,7 @@ def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = { jobBuilder .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1") .addJobContent("sqoop.mode", "import") .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis") .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" + " exchangis_table where sno =1 and $CONDITIONS") .addJobContent("sqoop.args.hcatalog.database", "hadoop") .addJobContent("sqoop.args.hcatalog.table", "partition_33") .addJobContent("sqoop.args.hcatalog.partition.keys", "month") .addJobContent("sqoop.args.hcatalog.partition.values", "4") .addJobContent("sqoop.args.num.mappers", "1") .build() }

    Parameter Comparison table (with native parameters):**

    sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuenamesqoop.args.connection.manager<===>--connection-managersqoop.args.connection.param.file<===>--connection-param-filesqoop.args.driver<===>--driversqoop.args.hadoop.home<===>--hadoop-homesqoop.args.hadoop.mapred.home<===>--hadoop-mapred-homesqoop.args.help<===>helpsqoop.args.password<===>--passwordsqoop.args.password.alias<===>--password-aliassqoop.args.password.file<===>--password-filesqoop.args.relaxed.isolation<===>--relaxed-isolationsqoop.args.skip.dist.cache<===>--skip-dist-cachesqoop.args.username<===>--usernamesqoop.args.verbose<===>--verbosesqoop.args.append<===>--appendsqoop.args.as.avrodatafile<===>--as-avrodatafilesqoop.args.as.parquetfile<===>--as-parquetfilesqoop.args.as.sequencefile<===>--as-sequencefilesqoop.args.as.textfile<===>--as-textfilesqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mappersqoop.args.boundary.query<===>--boundary-querysqoop.args.case.insensitive<===>--case-insensitivesqoop.args.columns<===>--columnssqoop.args.compression.codec<===>--compression-codecsqoop.args.delete.target.dir<===>--delete-target-dirsqoop.args.direct<===>--directsqoop.args.direct.split.size<===>--direct-split-sizesqoop.args.query<===>--querysqoop.args.fetch.size<===>--fetch-sizesqoop.args.inline.lob.limit<===>--inline-lob-limitsqoop.args.num.mappers<===>--num-mapperssqoop.args.mapreduce.job.name<===>--mapreduce-job-namesqoop.args.merge.key<===>--merge-keysqoop.args.split.by<===>--split-bysqoop.args.table<===>--tablesqoop.args.target.dir<===>--target-dirsqoop.args.validate<===>--validatesqoop.args.validation.failurehandler<===>--validation-failurehandlersqoop.args.validation.threshold<===> --validation-thresholdsqoop.args.validator<===>--validatorsqoop.args.warehouse.dir<===>--warehouse-dirsqoop.args.where<===>--wheresqoop.args.compress<===>--compresssqoop.args.check.column<===>--check-columnsqoop.args.incremental<===>--incrementalsqoop.args.last.value<===>--last-valuesqoop.args.enclosed.by<===>--enclosed-bysqoop.args.escaped.by<===>--escaped-bysqoop.args.fields.terminated.by<===>--fields-terminated-bysqoop.args.lines.terminated.by<===>--lines-terminated-bysqoop.args.mysql.delimiters<===>--mysql-delimiterssqoop.args.optionally.enclosed.by<===>--optionally-enclosed-bysqoop.args.input.enclosed.by<===>--input-enclosed-bysqoop.args.input.escaped.by<===>--input-escaped-bysqoop.args.input.fields.terminated.by<===>--input-fields-terminated-bysqoop.args.input.lines.terminated.by<===>--input-lines-terminated-bysqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-bysqoop.args.create.hive.table<===>--create-hive-tablesqoop.args.hive.delims.replacement<===>--hive-delims-replacementsqoop.args.hive.database<===>--hive-databasesqoop.args.hive.drop.import.delims<===>--hive-drop-import-delimssqoop.args.hive.home<===>--hive-homesqoop.args.hive.import<===>--hive-importsqoop.args.hive.overwrite<===>--hive-overwritesqoop.args.hive.partition.value<===>--hive-partition-valuesqoop.args.hive.table<===>--hive-tablesqoop.args.column.family<===>--column-familysqoop.args.hbase.bulkload<===>--hbase-bulkloadsqoop.args.hbase.create.table<===>--hbase-create-tablesqoop.args.hbase.row.key<===>--hbase-row-keysqoop.args.hbase.table<===>--hbase-tablesqoop.args.hcatalog.database<===>--hcatalog-databasesqoop.args.hcatalog.home<===>--hcatalog-homesqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keyssqoop.args.hcatalog.partition.values<===>--hcatalog-partition-valuessqoop.args.hcatalog.table<===>--hcatalog-tablesqoop.args.hive.partition.key<===>--hive-partition-keysqoop.args.map.column.hive<===>--map-column-hivesqoop.args.create.hcatalog.table<===>--create-hcatalog-tablesqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanzasqoop.args.accumulo.batch.size<===>--accumulo-batch-sizesqoop.args.accumulo.column.family<===>--accumulo-column-familysqoop.args.accumulo.create.table<===>--accumulo-create-tablesqoop.args.accumulo.instance<===>--accumulo-instancesqoop.args.accumulo.max.latency<===>--accumulo-max-latencysqoop.args.accumulo.password<===>--accumulo-passwordsqoop.args.accumulo.row.key<===>--accumulo-row-keysqoop.args.accumulo.table<===>--accumulo-tablesqoop.args.accumulo.user<===>--accumulo-usersqoop.args.accumulo.visibility<===>--accumulo-visibilitysqoop.args.accumulo.zookeepers<===>--accumulo-zookeeperssqoop.args.bindir<===>--bindirsqoop.args.class.name<===>--class-namesqoop.args.input.null.non.string<===>--input-null-non-stringsqoop.args.input.null.string<===>--input-null-stringsqoop.args.jar.file<===>--jar-filesqoop.args.map.column.java<===>--map-column-javasqoop.args.null.non.string<===>--null-non-stringsqoop.args.null.string<===>--null-stringsqoop.args.outdir<===>--outdirsqoop.args.package.name<===>--package-namesqoop.args.conf<===>-confsqoop.args.D<===>-Dsqoop.args.fs<===>-fssqoop.args.jt<===>-jtsqoop.args.files<===>-filessqoop.args.libjars<===>-libjarssqoop.args.archives<===>-archivessqoop.args.update.key<===>--update-keysqoop.args.update.mode<===>--update-modesqoop.args.export.dir<===>--export-dir
    - + \ No newline at end of file diff --git a/docs/1.1.3/introduction/index.html b/docs/1.1.3/introduction/index.html index 45f6415845c..96ddc69b6f5 100644 --- a/docs/1.1.3/introduction/index.html +++ b/docs/1.1.3/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.3/release-notes-1.1.3/index.html b/docs/1.1.3/release-notes-1.1.3/index.html index e1df2a89572..955fa47907f 100644 --- a/docs/1.1.3/release-notes-1.1.3/index.html +++ b/docs/1.1.3/release-notes-1.1.3/index.html @@ -7,15 +7,15 @@ Release Notes 1.1.3-RC1 | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Release Notes 1.1.3-RC1

    Apache Linkis(incubating) 1.1.3 includes all of Project Linkis-1.1.3.

    This release mainly integrates Prometheus to provide the basic capability of monitoring on linkis microservice; add task retries parameter for task submission; add records for the relationship between tasks and execution EC; Flink engine supports downloading Yarn logs to EC log directory; front-end page Support watermark; upgrade some security vulnerability components, etc.; fix known bugs reported by the community.

    The main functions are as follows:

    • Integrate prometheus to provide basic capability of monitoring on linkis microservice
    • Task submission supports the parameter of the number of task retries
    • Flink engine supports downloading Yarn logs to EC log directory
    • Some dependency package upgrades and community-known bug fixes

    abbreviation:

    • COMMON: Linkis Common
    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager
    • PS: Linkis Public Service
    • PE: Linkis Public Enhancement
    • RPC: Linkis Common RPC
    • CG: Linkis Computation Governance

    New Feature#

    • [Common][Linkis-1656] Integrate prometheus to provide basic capability for linkis microservice monitoring
    • [EC-Flink][Linkis-2241] Add Yarn Log Operator to support downloading Yarn logs to EC log directory
    • [Web][Linkis-2235] Front-end page supports watermark
    • [Entrance][Linkis-2164] Entrance supports the parameter of task retry times
    • [EC][Linkis-2163] Add task and execution EC records, EC information is recorded in the task's Metrics field

    Enhancement#

    • [ECM][Linkis-2243] Optimize the newly registered ECM service, optimize the service load selection logic, reduce the possible impact of new service availability issues
    • [PS-Jobhistory][Linkis-2198] Optimize task code cache file name, increase time format length, to avoid conflicts in long tasks execution
    • [EC-Python][Linkis-2175] Add py4j watchdog thread to monitor java process, preventing the case that java process quit abnormally, while python process doesn't quite
    • [Common][Linkis-2150] Both common and entry modules have custom variable substitution logic, thus merge them into the common module as optimization
    • [EC-JDBC][Linkis-2142] Fix the problem that the JDBC Engine console configuration cannot take effect immediately after modification (cache time is adjusted to configurable item)
    • [Entrance][Linkis-2160] The consumption queue for task submission supports configuring specific high-volume users
    • [PE][Linkis-2200] Tag code optimization, remove the persistence of tag key-value
    • [EC][Linkis-1749] When EC starts, make it possible to limit the port segment of the specified service through parameters
    • [Common-Storage][Linkis-2168] File type in FileSource supports variable configuration
    • [Common-Storage][Linkis-2161] Added support for formatting parameters automatically when exporting the result set to an excel file
    • [Gateway][Linkis-2249] Optimize the gateway's Parser logic code
    • [Web][Linkis-2248] User resource display page is sorted by user and creator
    • [Web][Linkis-2108] Optimize the front-end page layout, unify the basic style, and optimize the secondary menu display
    • [Install][Linkis-2319] Adjust the datasource service deployment mode, and it is enabled by default; when installing, configure the initial login password
    • [Install][Linkis-2421] When installing and deploying, configure kerberos-related authentication information
    • [EC][Linkis-2159] EC log log supports scrolling by size and time
    • [Common-Scheduler][Linkis-2272] Optimized code format and added LoopArray unit test
    • [PS-ContextService][Linkis-2234] Added a method for batch cleaning context values ​​in contextservice

    Bugs Fix#

    • [EC][Linkis-2275] Fix the problem that the EC engine heartbeat report log feild is too long in abnormal scenarios to cause storage failure
    • [Web][Linkis-2239] Fix yarm queue resource idle/busy state usage ratio ring chart is not displayed correctly
    • [PS-ContextService][Linkis-2226] Fix FileReader and BufferedReader resources not released in final
    • [Install][Linkis-2159] The problem of shell script authorization +x permission failure occurs when compiling in different systems
    • [Entrance][Linkis-2237] Refactor JobQueueLabel and JobRunningLabel, fix task queue label and task running label bug
    • [Build][Linkis-2354] Fix the ERROR level warning problem when compiling and packaging projects under WIN system
    • [Gateway][Linkis-2329] Fix the configuration problem of LDAP integration
    • [Entrance][Linkis-2238] Optimize the result set path to be separated by date to solve the problem of too many subdirectories in a single folder. The resustset path is in the same folder, such as "/tmp/linkis/hadoop/linkis/20220516_210525/IDE/40099", which may cause too many files in one folder
    • [Entrance][Linkis-2162] Optimize the result set path to be separated by date to solve the problem of too many subdirectories in a single folder
    • [Common][Linkis-2332] Close the SpringCloud default configuration center to reduce the interference of unnecessary log information
    • [Web][Linkis-2295] remove redundant code in web install script

    Security related#

    • [PS-Jobhistory][Linkis-2248] Added parameter verification to the task query list interface to prevent sql injection security issues
    • [PS-PublicService][Linkis-1949] /api/rest_j/v1/datasource/columns interface adds user permission check

    Dependency changes#

    • [Common][Linkis-2188] Bump poi 5.2.1 to poi 5.2.2, fix possible memory allocation problems
    • [Common][Linkis-2182] Bump gson:2.8.5 to gson:2.8.9

    Thanks#

    The release of Apache Linkis(incubating) 1.1.3 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors (in no particular order): Alexkun, CCweixiao, Davidhua1996, QuintinTao, caseone, det101 , doroxinrui, huiyuanjjjjuice, husofskyzy, hzdhgf, jackxu2011, legendtkl, liuyou2, peacewong, peacewong, pjfanning, ruY9527, saLeox, seayi, wForget, wallezhang, yyuser5201314

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/1.1.3/tags/feature/index.html b/docs/1.1.3/tags/feature/index.html index 185e1d09c73..93327147fce 100644 --- a/docs/1.1.3/tags/feature/index.html +++ b/docs/1.1.3/tags/feature/index.html @@ -7,7 +7,7 @@ 3 docs tagged with "Feature" | Apache Linkis - + @@ -15,7 +15,7 @@

    3 docs tagged with "Feature"

    View All Tags
    - + \ No newline at end of file diff --git a/docs/1.1.3/tags/index.html b/docs/1.1.3/tags/index.html index 59eb832ccb3..96f19199e94 100644 --- a/docs/1.1.3/tags/index.html +++ b/docs/1.1.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html index 53ac61072a8..75448aeaa3b 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/overview/index.html b/docs/1.1.3/tuning_and_troubleshooting/overview/index.html index 5a0e891215a..6d777e27ffe 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html index 757c9ea9c06..a3533909078 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index daa823c1a12..12920345f9b 100644 --- a/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.3/upgrade/upgrade_guide/index.html b/docs/1.1.3/upgrade/upgrade_guide/index.html index c9b2a59f923..ae4618de01c 100644 --- a/docs/1.1.3/upgrade/upgrade_guide/index.html +++ b/docs/1.1.3/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/console_manual/index.html b/docs/1.1.3/user_guide/console_manual/index.html index 3a9741532e8..2a4de1f3e1e 100644 --- a/docs/1.1.3/user_guide/console_manual/index.html +++ b/docs/1.1.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/how_to_use/index.html b/docs/1.1.3/user_guide/how_to_use/index.html index 82f384ea3a9..591034b5ab9 100644 --- a/docs/1.1.3/user_guide/how_to_use/index.html +++ b/docs/1.1.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/linkis-datasource-client/index.html b/docs/1.1.3/user_guide/linkis-datasource-client/index.html index 04eeebb3df1..06273fdaa08 100644 --- a/docs/1.1.3/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.3/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/linkiscli_manual/index.html b/docs/1.1.3/user_guide/linkiscli_manual/index.html index 891d7b0d57c..00f2f6016bb 100644 --- a/docs/1.1.3/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/overview/index.html b/docs/1.1.3/user_guide/overview/index.html index 4b6a07c4437..0949015256b 100644 --- a/docs/1.1.3/user_guide/overview/index.html +++ b/docs/1.1.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/sdk_manual/index.html b/docs/1.1.3/user_guide/sdk_manual/index.html index 56f99bc2258..d710df5ee4a 100644 --- a/docs/1.1.3/user_guide/sdk_manual/index.html +++ b/docs/1.1.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/udf/index.html b/docs/1.1.3/user_guide/udf/index.html index ddebea8247d..067c23f1e16 100644 --- a/docs/1.1.3/user_guide/udf/index.html +++ b/docs/1.1.3/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 65d5c8b071d..d17d1ecd92b 100644 --- a/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ Engine Plugin Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Engine Plugin Api

    EnginePluginRestful class

    refresh#

    Interface address:/api/rest_j/v1/engineplugin/refresh

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh a single resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ecTypetypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    refresh all#

    Interface address:/api/rest_j/v1/engineplugin/refreshAll

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh all ec resources

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index 64b435f3c05..3cd6ae7da1a 100644 --- a/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html b/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html index 169ed9b24bf..e0ffb250517 100644 --- a/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ Task Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Task Management

    EntranceMetricRestfulApi class

    Task management

    start task#

    Interface address:/api/rest_j/v1/entrance/api/metrics/runningtask

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Start task

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task info#

    Interface address:/api/rest_j/v1/entrance/api/metrics/taskinfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    engineTypeLabelEngine Type Labelqueryfalsestring
    useruserqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html index e4c07ecdb1e..484373b5f88 100644 --- a/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ Task Action | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Task Action

    EntranceRestfulApi class

    process task request#

    Interface address:/api/rest_j/v1/entrance/execute

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    The execute function handles the request submitted by the user to execute the task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonjsonbodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Submit the execute function#

    Interface address:/api/rest_j/v1/entrance/submit

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Submit execute function

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    SubmitjsonbodytrueSubmitSubmit

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    end task#

    Interface address: /api/rest_j/v1/entrance/{id}/kill

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    kill task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    End Jobs#

    Interface address: /api/rest_j/v1/entrance/{id}/killJobs

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    End Jobs

    Request example:

    {    "taskIDList": [],    "idList": []}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    idid request path generationtruestringstring
    taskIDListcollection of task IDsfalseStringString
    idListID collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/entrance/#id/killJobs",    "status": 0,    "message": "success",    "data": {        "messages": [{            "method": "",            "status": 0,            "message": "",            "data": {                "execID": ""            }        }]    }}

    task log#

    Interface address: /api/rest_j/v1/entrance/{id}/log

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get task log

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Pause task#

    Interface address:/api/rest_j/v1/entrance/{id}/pause

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Pause task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Mission progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progress

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progressWithResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Resource progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task status#

    Interface address:/api/rest_j/v1/entrance/{id}/status

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task status

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index feaf68493bc..bad4b10939b 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    EC Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index da86d02c8e6..a206160640d 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    ECM Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    ECM resource list#

    Interface address: /api/rest_j/v1/linkisManager/listAllEMs

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get a detailed list of all ECM resources, which can be queried according to conditions, and query all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    instanceinstance namequeryfalsestring
    nodeHealthyStatus, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'queryfalsestring
    ownerCreatorqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/listAllEMs",    "status": 0,    "message": "OK",    "data": {        "EMs": [{            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "instance": "",                "serviceInstance": {                    "instance": "",                    "applicationName": ""                },                "serviceName": "",                "featureKey": "",                "empty":            }],            "applicationName": "",            "instance": ":",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": ,                "instance":            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "owner": "",            "runningTasks": null,            "pendingTasks": null,            "succeedTasks": null,            "failedTasks": null,            "maxMemory": ,            "usedMemory": ,            "systemCPUUsed": null,            "systemLeftMemory": ,            "nodeHealthy": "",            "msg": "",            "startTime":        }]    }}

    Edit EMC instance#

    Interface address: /api/rest_j/v1/linkisManager/modifyEMInfo

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Edit or modify the instance under EMC management

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameEngine LabelfalseStringString
    emStatusInstance status, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'falseStringString
    instanceEngine instance namefalseStringString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionfalseStringString
    labelsThe engine instance updates the parameter content, and the collection stores the map typefalseListList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/modifyEMInfo",    "status": 0,    "message": "success",    "data": {}}

    Open engine log#

    Interface address: /api/rest_j/v1/linkisManager/openEngineLog

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Open the engine log, the stdout type engine log is opened by default

    Request example:

    {    applicationName: ""    emInstance: ""    instance: ""    parameters: {        pageSize: ,        fromLine: ,        logType: ""    }}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    emInstanceInstance nameStringfalseString
    fromLineFrom LineStringfalseString
    instanceEngine instance nameStringfalseString
    logTypeLog type, default stdout type, belonging to parametersStringfalseString
    pageSizePage SizeStringfalseString
    parametersPagination informationMapfalseMap

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/openEngineLog",    "status": 0,    "message": "OK",    "data": {        "result": {            "logPath": "",            "logs": [""],            "endLine": ,            "rows":        },        "isError": false,        "errorMsg": ""    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index d51eca9bf82..5bdaa721295 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ Engine Management | Apache Linkis - + @@ -16,7 +16,7 @@

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameThe application name, the outermost layer is an array and the engineInstance parameter is a levelfalseStringString
    engineInstanceThe name of the engine instance, the outermost layer is an array and the applicationName parameter is a levelfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 91944dba4c3..2e33edeba1a 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Resource Management

    RMMonitorRest class

    All user resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/allUserResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    All user resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    pagepagequeryfalseinteger(int32)
    sizesizequeryfalseinteger(int32)
    usernameusernamequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "total": 34,        "resources": [{            "id": ,            "username": "",            "creator": "",            "engineTypeWithVersion": "",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": "instance": 0            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": null,            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "createTime": ,            "updateTime": ,            "loadResourceStatus": "",            "queueResourceStatus":        }]    }}

    Application List#

    Interface address: /api/rest_j/v1/linkisManager/rm/applicationlist

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get the list of application engines in resource management

    Request example:

    {    userCreator: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    userCreatoruserCreatorquerytrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": ,    "status": ,    "message": "",    "data": {        "applications": [{            "creator": "",            "applicationList": {                "usedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "maxResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "lockedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "minResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "engineInstances": [{                    "resource": {                        "resourceType": "",                        "maxResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "minResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "usedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "lockedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "expectedResource": null,                        "leftResource": {                            "memory": ,                            "cores": ,                            "instance":                        }                    },                    "engineType": "",                    "owner": "",                    "instance": "",                    "creator": "",                    "startTime": "",                    "status": "",                    "label": ""                }]            }        }]    }}

    EngineType#

    Interface address: /api/rest_j/v1/linkisManager/rm/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    Engine Type

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Engine manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/engines

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Engine Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/queueresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue#

    Interface address: /api/rest_j/v1/linkisManager/rm/queues

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/resetResource

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Reset resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdresourceIdqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource information#

    Interface address: /api/rest_j/v1/linkisManager/rm/userresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Query resource list and detailed resource data such as usage percentage

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {            "userResources": [{            "userCreator": "",            "engineTypes": [{            "engineType": "",            "percent": ""            }],    "percent": ""        }]    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html index f22793aa6d0..9dc4ccab7f0 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ Context History Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context History Service

    ContextHistoryRestfulApi class

    create history#

    Interface address:/api/rest_j/v1/contextservice/createHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Create History

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get multiple histories#

    Interface address:/api/rest_j/v1/contextservice/getHistories

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get multiple history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get history#

    Interface address:/api/rest_j/v1/contextservice/getHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    sourceContext SourcefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete history#

    Interface address:/api/rest_j/v1/contextservice/removeHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    search history#

    Interface address:/api/rest_j/v1/contextservice/searchHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Search history

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    keywordsKeywordsfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index 8ac97af6669..9d2f0421205 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ Context Listening Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context Listening Service

    ContextListenerRestfulApi class

    Context listener service

    heartbeat#

    Interface address:/api/rest_j/v1/contextservice/heartbeat

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindIDListener#

    Interface address:/api/rest_j/v1/contextservice/onBindIDListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindKeyListener#

    Interface address:/api/rest_j/v1/contextservice/onBindKeyListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 718b73014c1..a8770337a24 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ Context Logging Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context Logging Service

    ContextIDRestfulApi class

    create text record#

    Interface address: /api/rest_j/v1/contextservice/createContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create text record

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIDContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get text ID#

    Interface address: /api/rest_j/v1/contextservice/getContextID

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIdContextIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete text ID#

    Interface address: /api/rest_j/v1/contextservice/removeContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset text ID#

    Interface address: /api/rest_j/v1/contextservice/resetContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Reset Text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Search text Id execution time#

    Interface address:/api/rest_j/v1/contextservice/searchContextIDByTime

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Search text ID execution time

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    accessTimeEndAccess end timequeryfalsestring
    accessTimeStartAccess Start Timequeryfalsestring
    createTimeEndCreate end timequeryfalsestring
    createTimeStartcreate timequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    updateTimeEndUpdate end timequeryfalsestring
    updateTimeStartUpdate timequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Modify text ID#

    Interface address: /api/rest_j/v1/contextservice/updateContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Modify text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html index f7e9b27f79b..b62e7957866 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ Context API | Apache Linkis - + @@ -33,7 +33,7 @@ |contextKey|contextKey|false|String|String|

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 3b1923b10aa..c747557ff1c 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM Project Operation Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BM Project Operation Management

    BmlProjectRestful class

    Attachment resource item#

    Interface address:/api/rest_j/v1/bml/attachResourceAndProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Attachment resource item

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    projectNameproject namestringfalsestring
    resourceidresource namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Create BML project#

    Interface address:/api/rest_j/v1/bml/createBmlProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Create BML project

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    accessusersaccess usersstringfalsestring
    editusersedit userstringfalsestring
    projectNameproject namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download shared resources#

    Interface address:/api/rest_j/v1/bml/downloadShareResource

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Download shared resources

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    Resourceidresource IDqueryfalsestring
    Versionversionqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Project information#

    Interface address:/api/rest_j/v1/bml/getProjectInfo

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Project information

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    ProjectNameproject namequeryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update project user#

    Interface address:/api/rest_j/v1/bml/updateProjectUsers

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Update project users

    Request parameters:

    parameter nameparameter descriptionwhether it is requiredrequest typedata typeschema
    accessusersaccess usersfalsestringstring
    editusersedit userfalsestringstring
    projectNameproject namefalsestringstring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update shared resources#

    Interface address:/api/rest_j/v1/bml/updateShareResource

    Request mode:POST

    Request data type:multipart/form-data

    Response data type:*/*

    Interface description:

    Update shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    filefileformdatafalseref
    resourceidresource IDqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Upload shared resources#

    Interface address:/api/rest_j/v1/bml/uploadShareResource

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Upload shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    expireTimeexpiration timequeryfalsestring
    expiretypefailure typequeryfalsestring
    filefile setformdatafalseref
    isexpireinvalidqueryfalsestring
    maxversionMAV versionqueryfalseref
    projectNameproject namequeryfalsestring
    resourceheaderresource headerqueryfalsestring
    systemsystemqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 58841f92895..e30d94324c4 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BML Resource Management

    BmlRestfulApi class

    update owner#

    Interface address:/api/rest_j/v1/bml/changeOwner

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update owner

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newOwnerOld OwnerfalseStringString
    oldOwnerNew OwnerfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Copy resources to other users#

    Interface address:/api/rest_j/v1/bml/copyResourceToAnotherUser

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Copy resources to specified user

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    anotherUserspecified userfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete resource#

    Interface address:/api/rest_j/v1/bml/deleteResource

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete multiple resources#

    Interface address:/api/rest_j/v1/bml/deleteResources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete multiple resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdsCollection of resource IDs, delete multiple resourcestrueListList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete version#

    Interface address:/api/rest_j/v1/bml/deleteVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download resources#

    Interface address:/api/rest_j/v1/bml/download

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the resources corresponding to download through the two parameters of resourceId and version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring
    versionResource version, if not specified, defaults to latestqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get Basic#

    Interface address:/api/rest_j/v1/bml/getBasic

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Basic

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResourceInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResources

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get version information#

    Interface address: /api/rest_j/v1/bml/getVersions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get bml version information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    resourceIdResource IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rollback version#

    Interface address:/api/rest_j/v1/bml/rollbackVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rollback version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdfalseStringString
    versionRollback versionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    update resource#

    Interface address:/api/rest_j/v1/bml/updateVersion

    Request method: POST

    Request data type: multipart/form-data

    Response data type: */*

    Interface description:

    Users update resource files through http

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefilefileformDatatrueref
    resourceIdresourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload resources#

    Interface address:/api/rest_j/v1/bml/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatatruearrayfile
    expireTimeexpireTimequeryfalsestring
    expireTypeexpireTypequeryfalsestring
    isExpireisExpirequeryfalsestring
    maxVersionmaxVersionqueryfalseinteger(int32)
    resourceHeaderresourceHeaderqueryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index e7126f36862..4b52eee9c57 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BMLFS Management

    BMLFsRestfulApi class

    Open ScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    projectNameProject namequeryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    -product-openScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/product/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    /product/openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save script from BML#

    Interface address:/api/rest_j/v1/filesystem/saveScriptToBML

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script from BML

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    creatorCreatortrueStringString
    fileNameFile nametrueStringString
    metadatametadatatrueStringString
    projectNameProject NametrueStringString
    resourceIdResource IDtrueStringString
    scriptContentContenttrueStringString
    SaveScriptToBMLjsonbodytrueSaveScriptToBMLSaveScriptToBML

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html index 6049747edcd..50c7e692f08 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ Ceneric Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Ceneric Api

    CommonRestfulApi class

    offline#

    Interface address:/api/rest_j/v1/offline

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Offline

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index b938487836b..1bbad5d2086 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html index bb7ab3fccbb..184e29596e0 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ Filesystem | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Filesystem

    FsRestfulApi class

    create new Dir#

    Interface address:/api/rest_j/v1/filesystem/createNewDir

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new Dir

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    create new file#

    Interface address: /api/rest_j/v1/filesystem/createNewFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete dir file or file#

    Interface address: /api/rest_j/v1/filesystem/deleteDirOrFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete dir file or file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    download#

    Interface address:/api/rest_j/v1/filesystem/download

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    charsetCharsettrueStringString
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    file info#

    Interface address:/api/rest_j/v1/filesystem/fileInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    File Information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    format#

    Interface address:/api/rest_j/v1/filesystem/formate

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    encodingencodingquerytruestring
    escapeQuotesescapeQuotesquerytruestring
    fieldDelimiterField Delimiterquerytruestring
    hasHeaderHash valuequerytrueboolean
    quotequotequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    function list#

    Interface address:/api/rest_j/v1/filesystem/getDirFileTrees

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of udf functions

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathrequest pathquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/getDirFileTrees",    "status": 0,    "message": "OK",    "data": {        "dirFileTrees": {            "name": "",            "path": "",            "properties": null,            "children": [{                "name": "",                "path": "",                "properties": {                    "size": "",                    "modifytime": ""                },                "children": ,                "isLeaf": ,                "parentPath": ""            }],            "isLeaf": ,            "parentPath":        }    }}

    root path#

    Interface address:/api/rest_j/v1/filesystem/getUserRootPath

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get root path

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathTypeFileTypequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    does it exist#

    Interface address: /api/rest_j/v1/filesystem/isExist

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Whether it exists

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    open a file#

    Interface address: /api/rest_j/v1/filesystem/openFile

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": [{            "columnName": "_c0",            "comment": "NULL",            "dataType": ""        }],        "totalPage": ,        "totalLine": ,        "page": ,        "type": "",        "fileContent": [            [""]        ]    }}

    Turn on logging#

    Interface address:/api/rest_j/v1/filesystem/openLog

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open logging

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressqueryfalsestring
    proxyUserProxy Userqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openLog",    "status": 0,    "message": "OK",    "data": {        "log": ["", ""]    }}

    Rename#

    Interface address:/api/rest_j/v1/filesystem/rename

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rename the file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newDestnew namefalseStringString
    oldDestold namefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert the result set to Excel#

    Interface address: /api/rest_j/v1/filesystem/resultsetToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Convert the result set to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetresult setqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitlimitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file typequeryfalsestring
    pathaddressqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert resultsets to Excel#

    Interface address:/api/rest_j/v1/filesystem/resultsetsToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoquerytrueboolean
    limitlimitquerytrueref
    nullValuenull valuequerytruestring
    outputFileNameOutput file namequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save the script#

    Interface address:/api/rest_j/v1/filesystem/saveScript

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathaddresstrueStringString
    SaveScriptjsonbodytrueSaveScriptSaveScript
    charsetCharsetfalseStringString
    paramsPage SizefalseObjectObject
    scriptContentpage numberfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload#

    Interface address:/api/rest_j/v1/filesystem/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload files, multiple files can be uploaded

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatafalseref
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index c03230a2fbc..84daa3ae43c 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ Add Global Variable | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Add Global Variable

    VariableRestfulApi class

    add global variables#

    Interface address:/api/rest_j/v1/variable/saveGlobalVariable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add global variables

    Request example:

    {    globalVariables: [{        keyID: ,        key: "",        valueID: ,        value: ""    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    globalVariablesAdded parameter data one-to-many key:globalVariables,value:ListMaptrueMap
    keyParameter name, belonging to globalVariablesStringtrueString
    valuevariable value, and key belong to the key-value pair that is contained by globalVariablesListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/variable/saveGlobalVariable",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index 4ada8e6aef1..744091dbda1 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ Admin Console Home Page Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Admin Console Home Page Interface

    QueryRestfulApi class

    admin authentication#

    Interface address:/api/rest_j/v1/jobhistory/governanceStationAdmin

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Used to verify whether it is an administrator, if it is, it will return true if it is not false

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    data: {        solution: null,        admin: true    }    message: "OK"    method: "/api/jobhistory/governanceStationAdmin"    status: 0}

    global history#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Acquire global historical data list according to conditions and get all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    executeApplicationNameoperatorqueryfalsestring
    isAdminViewWhether it is in administrator mode or normal modequeryfalseboolean
    pageSizeNumber of pagesqueryfalseref
    proxyUserProxy Userqueryfalsestring
    startDateStart timequeryfalseinteger(int64)
    statusend timequeryfalsestring
    taskIDIDqueryfalseinteger(int64)
    tpageNowpage numberqueryfalseref
    pageNowpageNowqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    {        "method": "/api/jobhistory/list",        "status": 0,        "message": "OK",        "data": {            "solution": null,            "totalPage": 90,            "tasks": [{                "taskID": ,                "instance": ",                "execId": "",                "umUser": "",                "engineInstance": null,                "executionCode": "",                "progress": "",                "logPath": "",                "resultLocation": "",                "status": "",                "createdTime": ,                "updatedTime": ,                "engineType": "",                "errCode": 0,                "errDesc": "",                "executeApplicationName": "",                "requestApplicationName": "",                "runType": "datachecker",                "paramsJson": "",                "costTime": 1000,                "strongerExecId": "",                "sourceJson": "",                "sourceTailor": "",                "engineStartTime": null,                "labels": [],                "canRetry": ,                "subJobs":            }]        }    }}

    list undo#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Undo list

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    engineTypeengineTypequeryfalsestring
    pageNowpageNowqueryfalseref
    pageSizepageSizequeryfalseref
    startDateStart timequeryfalseref
    startTaskIDstartTaskIDqueryfalseinteger(int64)
    statusstatusqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    History details#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the detailed information of a history through the history ID

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idHistoryIdqueryfalseinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/jobhistory/1928730/get",    "status": 0,    "message": "OK",    "data": {        "task": {            "taskID": ,            "instance": "",            "execId": "",            "umUser": "",            "engineInstance": "",            "executionCode": "",            "progress": "",            "logPath": "",            "resultLocation": "",            "status": "",            "createdTime":,            "updatedTime": ,            "engineType": "",            "errCode": ,            "errDesc": "",            "executeApplicationName": "",            "requestApplicationName": "",            "runType": "hql",            "paramsJson": "",            "costTime": ,            "strongerExecId": "",            "sourceJson": "",            "sourceTailor": "",            "engineStartTime": null,            "labels": [""],            "canRetry": false,            "subJobs": null        }    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index e133cfeaf60..211db75339c 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ Instance Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Instance Management

    InstanceRestful class

    Microservice instance list#

    Interface address: /api/rest_j/v1/microservice/allInstance

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the list of microservice management module instances to get single or multiple default all

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "instances": [{            "id": ,            "updateTime": ,            "createTime": ,            "applicationName": ",            "instance": "",            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "id": 5,                "labelValueSize": 0,                "modifiable": true,                "updateTime": ,                "createTime": ,                "featureKey": "",                "empty":            }]        }]    }}

    Get eurekaURL#

    Interface address: /api/rest_j/v1/microservice/eurekaURL

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    return eurekaURL

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "url": ""    }}

    Edit the microservice instance#

    Interface address: /api/rest_j/v1/microservice/instanceLabel

    Request method: PUT

    Request data type: application/json

    Response data type: */*

    Interface description:

    Edit or modify the instance in microservice management

    Request example:

    {    applicationName: "linkis-ps-cs"    instance: "bdpdws110004:9108"    labels: [{        labelKey: "route",        stringValue: "cs_2_dev"    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    instanceEngine instance nameStringfalseString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionStringfalseString
    labelsThe engine instance updates the parameter content, and the collection stores the map typeListfalseList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "success",    "data": {        "labels": [{            "stringValue": "",            "labelKey": "",            "feature": null,            "modifiable": ,            "featureKey": "",            "empty":        }]    }}

    Modifiable label types#

    Interface address:/api/rest_j/v1/microservice/modifiableLabelKey

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of label types that can be modified, such as 'userCreator, route'

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {    "keyList": []    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index 0b344d20823..8d5e3087d07 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ History Job Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "admin": true    }}

    getHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ididpathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    listHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    taskIDtaskIDpathfalseLong
    executeApplicationNameexecuteApplicationNamepathfalsestring
    creatorcreatorpathfalsestring
    proxyUserproxyUserpathfalsestring
    isAdminViewisAdminViewpathfalseBoolean

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}

    listUndoneHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    startTaskIDstartTaskIDpathfalseLong
    engineTypeengineTypepathfalsestring
    creatorcreatorpathfalsestring

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html index e78fb7f4929..9dc92a71058 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis Error Codes | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis Error Codes

    LinkisErrorCodeRestful class

    Get Linkis error code#

    Interface address:/api/rest_j/v1/errorcode/getAllErrorCodes

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Linkis error code list

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index edc375e7205..c8093439d9c 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq Table Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Mdq Table Interface

    MdqTableRestfulApi class

    Activate table operations#

    Interface address:/api/rest_j/v1/datasource/active

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Activate table operation

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableIdTable IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Generate the DDL statement for the new library table#

    Interface address:/api/rest_j/v1/datasource/displaysql

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Generate DDL statement for new library table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableTableStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get partition statistics#

    Interface address:/api/rest_j/v1/datasource/getPartitionStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get partition statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    partitionSortPartition SortStringfalseString
    tableNametable namequeryfalsestring
    partitionPathpartitionPathqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table information#

    Interface address:/api/rest_j/v1/datasource/getTableBaseInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table field information#

    Interface address:/api/rest_j/v1/datasource/getTableFieldsInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table field information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table statistics#

    Interface address:/api/rest_j/v1/datasource/getTableStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    partitionSortPartition Sortqueryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Active ID#

    Interface address:/api/rest_j/v1/datasource/persistTable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Activated logo

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    tableTablefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index db1f1f9601c..eceae827349 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index 030de25a3fb..6c7791a6211 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ Parameter Configuration | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Parameter Configuration

    ConfigurationRestfulApi class

    Add KeyForEngine#

    Interface address:/api/rest_j/v1/configuration/addKeyForEngine

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Add KeyForEngine

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    engineTypeengineTypequeryfalsestring
    keyJsonkeyJsonqueryfalsestring
    tokentokenqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Add application type#

    Interface address:/api/rest_j/v1/configuration/createFirstCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add application type tag

    Request example:

    {    "categoryName": "",    "description": ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryNameReference type label namefalseStringString
    descriptionDescriptionfalseStringSTRing

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createFirstCategory",    "status": 0,    "message": "OK",    "data": {}}

    Add parameter configuration#

    Interface address:/api/rest_j/v1/configuration/createSecondCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add parameter configuration

    Request example:

    {    categoryId: ,    description: "",    engineType: "",    version: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryIdParameter ConfigurationIdtrueStringString
    descriptionDescriptiontrueStringString
    engineTypeEngine TypetrueStringString
    versionversion numbertrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createSecondCategory",    "status": 0,    "message": "OK",    "data": {}}

    delete configuration#

    Interface address: /api/rest_j/v1/configuration/deleteCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete parameter configuration

    Request example:

    {    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdParameter ConfigurationIdStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/deleteCategory",    "status": 0,    "message": "OK",    "data": {}}

    Engine type list#

    Interface address:/api/rest_j/v1/configuration/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of engine types

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/engineType",    "status": 0,    "message": "OK",    "data": {    "engineType": []    }}

    App types#

    Interface address: /api/rest_j/v1/configuration/getCategory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Apply type tag in parameter configuration

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getCategory",    "status": 0,    "message": "OK",    "data": {        "Category": [{            "categoryId": ,            "labelId": ,            "categoryName": "",            "childCategory": [],            "description": null,            "tag": null,            "createTime": ,            "updateTime": ,            "level": ,            "fatherCategoryName": ""        }],        "description": null,        "tag": null,        "createTime": ,        "updateTime": ,        "level": ,        "fatherCategoryName":    }]}}

    queue resources#

    Interface address:/api/rest_j/v1/configuration/getFullTreesByAppName

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    The queue resource module in the parameter configuration returns the column and value of the queue resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorlabel namequeryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getFullTreesByAppName",    "status": 0,    "message": "OK",    "data": {        "fullTree": [{            "name": "Queue Resource",            "description": null,            "settings": [{                "id": ,                "key": "",                "description": "",                "name": "",                "defaultValue": "",                "validateType": "",                "validateRange": "[]",                "level": 1,                "engineType": ,                "treeName": "",                "valueId": ,                "configValue": "",                "configLabelId": ,                "unit": null,                "isUserDefined": ,                "hidden": ,                "advanced":            }]        }]    }}

    Get key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get key value

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    configKeyconfigKeyquerytruestring
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    configValueconfigValuetrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString
    SaveKeyValuejsonbodytrueSaveKeyValueSaveKeyValue

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Delete key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rpc test#

    Interface address: /api/rest_j/v1/configuration/rpcTest

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    rpc test

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    usernameusernamequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save queue resources#

    Interface address:/api/rest_j/v1/configuration/saveFullTree

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save queue resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorApp Type NameStringtrueString
    descriptionDescription, belonging to the content in fullTreeStringtrueString
    engineTypeEngine TypeStringtrueString
    fullTreeDetails under Application TypeListtrueList
    nameQueue resource name, which belongs to the content in fullTreeStringtrueString
    settingsDetailed content in the queue resource, belonging to the content in fullTreeListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/saveFullTree",    "status": 0,    "message": "OK",    "data": {}}

    Update category information#

    Interface address: /api/rest_j/v1/configuration/updateCategoryInfo

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update category information

    Sample Response:

    {    description: "",    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdcategoryIdStringtrueString
    descriptiondescriptionStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/updateCategoryInfo",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html index 6139a257267..9791bb5bb44 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF Operations Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    UDF Operations Management

    UDFApi class

    new#

    Interface address:/api/rest_j/v1/udf/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    clusterNameclusterNamefalseStringString
    createTimeCreateTimefalseDateDate
    createUserCreatorfalseStringString
    descriptionDescriptionfalseStringString
    directoryCategory, personal function first-level directoryfalseStringString
    isExpireis invalidfalseBooleanBoolean
    isLoadWhether to loadfalseBooleanBoolean
    isSharedSharedfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    syssysfalseStringString
    treeIdtreeIdfalseLongLong
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    updateTimeUpdate timefalseDateDate
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf tree menu#

    Interface address:/api/rest_j/v1/udf/all

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get detailed information of udf tree menu

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathRequest PathfalseStringString
    jsonStringjsonStringfalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get udf user list#

    Interface address:/api/rest_j/v1/udf/allUdfUsers

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get udf user list

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    confirmed#

    Interface address: /api/rest_j/v1/udf/authenticate

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Prove...is real

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Publish#

    Interface address:/api/rest_j/v1/udf/publish

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF version released

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    fallback version#

    Interface address:/api/rest_j/v1/udf/rollback

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Back to version

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    set expiration#

    Interface address:/api/rest_j/v1/udf/setExpire

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Setting expired

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseLongLong

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF sharing#

    Interface address: /api/rest_j/v1/udf/shareUDF

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF sharing

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    sharedUserssharedUsersfalseListList
    udfInfoudfInfofalseUDFInfoUDFInfo

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree new#

    Interface address:/api/rest_j/v1/udf/tree/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree delete#

    Interface address:/api/rest_j/v1/udf/tree/delete/{id}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    tree delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree update#

    Interface address:/api/rest_j/v1/udf/tree/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree update

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    renew#

    Interface address:/api/rest_j/v1/udf/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF modification

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    descriptionDescriptionfalseStringString
    ididfalseLongLong
    isLoadWhether to loadfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get user directory#

    Interface address: /api/rest_j/v1/udf/userDirectory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the first-level classification of the user's personal function

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryGet the user directory of the specified collection type, if the type is UDF, get the user directory under this typefalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    version list#

    Interface address:/api/rest_j/v1/udf/versionList

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    View version list

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/jdbc_api/index.html b/docs/latest/api/jdbc_api/index.html index 2389a2d0783..a39f84b4240 100644 --- a/docs/latest/api/jdbc_api/index.html +++ b/docs/latest/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/latest/api/linkis_task_operator/index.html b/docs/latest/api/linkis_task_operator/index.html index 6979d6c7dd8..c7aeac21838 100644 --- a/docs/latest/api/linkis_task_operator/index.html +++ b/docs/latest/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    {  "method": "",  "status": 0,  "message": "",  "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    {  "method": "/api/rest_j/v1/entrance/execute",  "status": 0,  "message": "Request executed successfully",  "data": {    "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",    "taskID": "123"  }}
    - + \ No newline at end of file diff --git a/docs/latest/api/login_api/index.html b/docs/latest/api/login_api/index.html index 0e39c69db28..8ad367f9b41 100644 --- a/docs/latest/api/login_api/index.html +++ b/docs/latest/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/latest/api/overview/index.html b/docs/latest/api/overview/index.html index 1b22c42796c..35b4fa1662f 100644 --- a/docs/latest/api/overview/index.html +++ b/docs/latest/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/commons/message_scheduler/index.html b/docs/latest/architecture/commons/message_scheduler/index.html index f14f1e21249..8d98115a70f 100644 --- a/docs/latest/architecture/commons/message_scheduler/index.html +++ b/docs/latest/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/commons/rpc/index.html b/docs/latest/architecture/commons/rpc/index.html index cc2d8eea088..df34304d9d4 100644 --- a/docs/latest/architecture/commons/rpc/index.html +++ b/docs/latest/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index 8aa75942aee..1c65159730d 100644 --- a/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Start an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html index 00804e634d9..e2142c5250a 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 9f2e1da9d4c..679e9f0bbb4 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index f17640e2ec1..10e9d28361a 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/entrance/index.html b/docs/latest/architecture/computation_governance_services/entrance/index.html index f0d5bb9badc..3366b61dc72 100644 --- a/docs/latest/architecture/computation_governance_services/entrance/index.html +++ b/docs/latest/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index 3223cb9eae2..0e060370e91 100644 --- a/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis-cli/index.html b/docs/latest/architecture/computation_governance_services/linkis-cli/index.html index ade8f0184e6..016c77549c6 100644 --- a/docs/latest/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 40635b6be7e..19d542cfaba 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 445db1efa7c..7a824edda7a 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html index 791aeaf7a7f..f97f875fafb 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 386d0b65830..04ca46d49f5 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/overview/index.html b/docs/latest/architecture/computation_governance_services/overview/index.html index de7ec259248..fd09cbbe712 100644 --- a/docs/latest/architecture/computation_governance_services/overview/index.html +++ b/docs/latest/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/proxy_user/index.html b/docs/latest/architecture/computation_governance_services/proxy_user/index.html index 7172946c941..49e40724ac4 100644 --- a/docs/latest/architecture/computation_governance_services/proxy_user/index.html +++ b/docs/latest/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/latest/architecture/difference_between_1.0_and_0.x/index.html b/docs/latest/architecture/difference_between_1.0_and_0.x/index.html index d9f684fd300..3b74bc026fc 100644 --- a/docs/latest/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/latest/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/latest/architecture/microservice_governance_services/gateway/index.html b/docs/latest/architecture/microservice_governance_services/gateway/index.html index 27f69e7c90d..279a2e8c94a 100644 --- a/docs/latest/architecture/microservice_governance_services/gateway/index.html +++ b/docs/latest/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/latest/architecture/microservice_governance_services/overview/index.html b/docs/latest/architecture/microservice_governance_services/overview/index.html index 7733b10967b..1fadc4cb3e2 100644 --- a/docs/latest/architecture/microservice_governance_services/overview/index.html +++ b/docs/latest/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/architecture/overview/index.html b/docs/latest/architecture/overview/index.html index 343b7b6f4a7..d71b04907bc 100644 --- a/docs/latest/architecture/overview/index.html +++ b/docs/latest/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 395460169a0..8ced81a5cbb 100644 --- a/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/bml/overview/index.html b/docs/latest/architecture/public_enhancement_services/bml/overview/index.html index fde2e48a9ab..5fe2a8f3b4a 100644 --- a/docs/latest/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html index fef2f24f915..3abda54f98b 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html index cfb4010b4f1..00d512a43a4 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html index c6a1914587f..f7ffc543682 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index fa70d5775ad..ed0d4f837b6 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 36347f05b15..ccd22841820 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 2c729bb7797..5d858d77760 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html index 6072d048861..b2a744f7b6c 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html b/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html index d27f136059c..1dbb406007a 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html b/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html index 369f83a60b0..6d5d2b939e0 100644 --- a/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html b/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html index b2895cb3a4f..86b4d55a7a5 100644 --- a/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/overview/index.html b/docs/latest/architecture/public_enhancement_services/overview/index.html index 6c72101c5c5..ca8d86ee193 100644 --- a/docs/latest/architecture/public_enhancement_services/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/public_service/index.html b/docs/latest/architecture/public_enhancement_services/public_service/index.html index 6d901c47f5d..41161ab34df 100644 --- a/docs/latest/architecture/public_enhancement_services/public_service/index.html +++ b/docs/latest/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/cluster_deployment/index.html b/docs/latest/deployment/cluster_deployment/index.html index ad1e2d8ac86..2caeac5832e 100644 --- a/docs/latest/deployment/cluster_deployment/index.html +++ b/docs/latest/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/latest/deployment/deploy_linkis_without_hdfs/index.html b/docs/latest/deployment/deploy_linkis_without_hdfs/index.html index 7a3a97fef8a..034c77f6a79 100644 --- a/docs/latest/deployment/deploy_linkis_without_hdfs/index.html +++ b/docs/latest/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Deploy Linkis without HDFS | Apache Linkis - + @@ -20,7 +20,7 @@ [INFO] Retrieving result-set, may take time if result-set is large, please do not exit program.============ RESULT SET 1 ============hello ############Execute Success!!!########
    - + \ No newline at end of file diff --git a/docs/latest/deployment/engine_conn_plugin_installation/index.html b/docs/latest/deployment/engine_conn_plugin_installation/index.html index 2eef08581fc..7e37c99b0c4 100644 --- a/docs/latest/deployment/engine_conn_plugin_installation/index.html +++ b/docs/latest/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/installation_hierarchical_structure/index.html b/docs/latest/deployment/installation_hierarchical_structure/index.html index 7cd536c6216..d78226107ae 100644 --- a/docs/latest/deployment/installation_hierarchical_structure/index.html +++ b/docs/latest/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/latest/deployment/involve_skywalking_into_linkis/index.html b/docs/latest/deployment/involve_skywalking_into_linkis/index.html index 2c1bece28bb..62332e07ebe 100644 --- a/docs/latest/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/latest/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/linkis_scriptis_install/index.html b/docs/latest/deployment/linkis_scriptis_install/index.html index 2c70102ea56..c7b287e3517 100644 --- a/docs/latest/deployment/linkis_scriptis_install/index.html +++ b/docs/latest/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of tool scriptis | Apache Linkis - + @@ -28,7 +28,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note the difference between root and alias in nginx

    • The result of root processing is: root path + location path
    • The result of alias processing is to replace the location path with the alias path
    • Alias is the definition of a directory alias, and root is the definition of the top-level directory

    4. scriptis Use steps#

    4.1 Log in to the linkis management console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because scripts requires login verification, you need to log in first to get the cookie.

    4.2 Visit the scripts page after successful login#

    #http://10.10.10.10:8080/scriptis/http://nginxIp:port/scriptis/

    Nginxip:nginx server IP, port:linkis management console nginx configuration start port number, scripts is the location address configured for the static file nginx of the requested scripts project (customizable)

    4.3 use scriptis#

    Take creating an SQL query task as an example.

    step1 New script

    design sketch

    step2 Enter the statement to query

    design sketch

    step3 function

    design sketch

    shep4 View results

    design sketch

    - + \ No newline at end of file diff --git a/docs/latest/deployment/quick_deploy/index.html b/docs/latest/deployment/quick_deploy/index.html index bea6fb1495a..1e48ebf6dbf 100644 --- a/docs/latest/deployment/quick_deploy/index.html +++ b/docs/latest/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on Linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/latest/deployment/sourcecode_hierarchical_structure/index.html b/docs/latest/deployment/sourcecode_hierarchical_structure/index.html index 01f35e5f847..2090ecbf270 100644 --- a/docs/latest/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/latest/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/latest/deployment/start_metadatasource/index.html b/docs/latest/deployment/start_metadatasource/index.html index ae8bdf25173..fca9cffc1ab 100644 --- a/docs/latest/deployment/start_metadatasource/index.html +++ b/docs/latest/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/latest/deployment/unpack_hierarchical_structure/index.html b/docs/latest/deployment/unpack_hierarchical_structure/index.html index 42c15ba8f35..eaf691bb3e2 100644 --- a/docs/latest/deployment/unpack_hierarchical_structure/index.html +++ b/docs/latest/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/deployment/web_install/index.html b/docs/latest/deployment/web_install/index.html index 89aced4605e..67cbbbabb62 100644 --- a/docs/latest/deployment/web_install/index.html +++ b/docs/latest/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_compile_and_package/index.html b/docs/latest/development/linkis_compile_and_package/index.html index bbb032264ad..f6ae777d206 100644 --- a/docs/latest/development/linkis_compile_and_package/index.html +++ b/docs/latest/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_config/index.html b/docs/latest/development/linkis_config/index.html index 4208ac72e5a..6f67f3e9fb4 100644 --- a/docs/latest/development/linkis_config/index.html +++ b/docs/latest/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_debug/index.html b/docs/latest/development/linkis_debug/index.html index 640f05eb8bc..d4f43b6eb15 100644 --- a/docs/latest/development/linkis_debug/index.html +++ b/docs/latest/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -49,7 +49,7 @@ y

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_debug_in_mac/index.html b/docs/latest/development/linkis_debug_in_mac/index.html index c007649853a..98a0a942318 100644 --- a/docs/latest/development/linkis_debug_in_mac/index.html +++ b/docs/latest/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/latest/development/new_engine_conn/index.html b/docs/latest/development/new_engine_conn/index.html index a4373c584b6..7624ca9840c 100644 --- a/docs/latest/development/new_engine_conn/index.html +++ b/docs/latest/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -52,7 +52,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/latest/development/web_build/index.html b/docs/latest/development/web_build/index.html index 4a78519f508..bb1283d1d8d 100644 --- a/docs/latest/development/web_build/index.html +++ b/docs/latest/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/flink/index.html b/docs/latest/engine_usage/flink/index.html index f55353616d2..530b3567580 100644 --- a/docs/latest/engine_usage/flink/index.html +++ b/docs/latest/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/hive/index.html b/docs/latest/engine_usage/hive/index.html index 9ebed7fa812..06546801ceb 100644 --- a/docs/latest/engine_usage/hive/index.html +++ b/docs/latest/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/jdbc/index.html b/docs/latest/engine_usage/jdbc/index.html index 45dd8be788e..e982080e69e 100644 --- a/docs/latest/engine_usage/jdbc/index.html +++ b/docs/latest/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/openlookeng/index.html b/docs/latest/engine_usage/openlookeng/index.html index e160e18a300..67ca9dc267c 100644 --- a/docs/latest/engine_usage/openlookeng/index.html +++ b/docs/latest/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/overview/index.html b/docs/latest/engine_usage/overview/index.html index 1fff265602e..ec51ec37d28 100644 --- a/docs/latest/engine_usage/overview/index.html +++ b/docs/latest/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/pipeline/index.html b/docs/latest/engine_usage/pipeline/index.html index 84b1db100e7..db125f751fb 100644 --- a/docs/latest/engine_usage/pipeline/index.html +++ b/docs/latest/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ pipeline engine | Apache Linkis - + @@ -20,7 +20,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/python/index.html b/docs/latest/engine_usage/python/index.html index 583d86975f8..6a732ea1a53 100644 --- a/docs/latest/engine_usage/python/index.html +++ b/docs/latest/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/shell/index.html b/docs/latest/engine_usage/shell/index.html index 16314ab7365..f7202b13a1a 100644 --- a/docs/latest/engine_usage/shell/index.html +++ b/docs/latest/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/spark/index.html b/docs/latest/engine_usage/spark/index.html index 17643c2c161..79791d803ac 100644 --- a/docs/latest/engine_usage/spark/index.html +++ b/docs/latest/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/sqoop/index.html b/docs/latest/engine_usage/sqoop/index.html index a51c5ec2b78..53456aa3352 100644 --- a/docs/latest/engine_usage/sqoop/index.html +++ b/docs/latest/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop Engine | Apache Linkis - + @@ -25,7 +25,7 @@ def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = { jobBuilder .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1") .addJobContent("sqoop.mode", "import") .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis") .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" + " exchangis_table where sno =1 and $CONDITIONS") .addJobContent("sqoop.args.hcatalog.database", "hadoop") .addJobContent("sqoop.args.hcatalog.table", "partition_33") .addJobContent("sqoop.args.hcatalog.partition.keys", "month") .addJobContent("sqoop.args.hcatalog.partition.values", "4") .addJobContent("sqoop.args.num.mappers", "1") .build() }

    Parameter Comparison table (with native parameters):**

    sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuenamesqoop.args.connection.manager<===>--connection-managersqoop.args.connection.param.file<===>--connection-param-filesqoop.args.driver<===>--driversqoop.args.hadoop.home<===>--hadoop-homesqoop.args.hadoop.mapred.home<===>--hadoop-mapred-homesqoop.args.help<===>helpsqoop.args.password<===>--passwordsqoop.args.password.alias<===>--password-aliassqoop.args.password.file<===>--password-filesqoop.args.relaxed.isolation<===>--relaxed-isolationsqoop.args.skip.dist.cache<===>--skip-dist-cachesqoop.args.username<===>--usernamesqoop.args.verbose<===>--verbosesqoop.args.append<===>--appendsqoop.args.as.avrodatafile<===>--as-avrodatafilesqoop.args.as.parquetfile<===>--as-parquetfilesqoop.args.as.sequencefile<===>--as-sequencefilesqoop.args.as.textfile<===>--as-textfilesqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mappersqoop.args.boundary.query<===>--boundary-querysqoop.args.case.insensitive<===>--case-insensitivesqoop.args.columns<===>--columnssqoop.args.compression.codec<===>--compression-codecsqoop.args.delete.target.dir<===>--delete-target-dirsqoop.args.direct<===>--directsqoop.args.direct.split.size<===>--direct-split-sizesqoop.args.query<===>--querysqoop.args.fetch.size<===>--fetch-sizesqoop.args.inline.lob.limit<===>--inline-lob-limitsqoop.args.num.mappers<===>--num-mapperssqoop.args.mapreduce.job.name<===>--mapreduce-job-namesqoop.args.merge.key<===>--merge-keysqoop.args.split.by<===>--split-bysqoop.args.table<===>--tablesqoop.args.target.dir<===>--target-dirsqoop.args.validate<===>--validatesqoop.args.validation.failurehandler<===>--validation-failurehandlersqoop.args.validation.threshold<===> --validation-thresholdsqoop.args.validator<===>--validatorsqoop.args.warehouse.dir<===>--warehouse-dirsqoop.args.where<===>--wheresqoop.args.compress<===>--compresssqoop.args.check.column<===>--check-columnsqoop.args.incremental<===>--incrementalsqoop.args.last.value<===>--last-valuesqoop.args.enclosed.by<===>--enclosed-bysqoop.args.escaped.by<===>--escaped-bysqoop.args.fields.terminated.by<===>--fields-terminated-bysqoop.args.lines.terminated.by<===>--lines-terminated-bysqoop.args.mysql.delimiters<===>--mysql-delimiterssqoop.args.optionally.enclosed.by<===>--optionally-enclosed-bysqoop.args.input.enclosed.by<===>--input-enclosed-bysqoop.args.input.escaped.by<===>--input-escaped-bysqoop.args.input.fields.terminated.by<===>--input-fields-terminated-bysqoop.args.input.lines.terminated.by<===>--input-lines-terminated-bysqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-bysqoop.args.create.hive.table<===>--create-hive-tablesqoop.args.hive.delims.replacement<===>--hive-delims-replacementsqoop.args.hive.database<===>--hive-databasesqoop.args.hive.drop.import.delims<===>--hive-drop-import-delimssqoop.args.hive.home<===>--hive-homesqoop.args.hive.import<===>--hive-importsqoop.args.hive.overwrite<===>--hive-overwritesqoop.args.hive.partition.value<===>--hive-partition-valuesqoop.args.hive.table<===>--hive-tablesqoop.args.column.family<===>--column-familysqoop.args.hbase.bulkload<===>--hbase-bulkloadsqoop.args.hbase.create.table<===>--hbase-create-tablesqoop.args.hbase.row.key<===>--hbase-row-keysqoop.args.hbase.table<===>--hbase-tablesqoop.args.hcatalog.database<===>--hcatalog-databasesqoop.args.hcatalog.home<===>--hcatalog-homesqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keyssqoop.args.hcatalog.partition.values<===>--hcatalog-partition-valuessqoop.args.hcatalog.table<===>--hcatalog-tablesqoop.args.hive.partition.key<===>--hive-partition-keysqoop.args.map.column.hive<===>--map-column-hivesqoop.args.create.hcatalog.table<===>--create-hcatalog-tablesqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanzasqoop.args.accumulo.batch.size<===>--accumulo-batch-sizesqoop.args.accumulo.column.family<===>--accumulo-column-familysqoop.args.accumulo.create.table<===>--accumulo-create-tablesqoop.args.accumulo.instance<===>--accumulo-instancesqoop.args.accumulo.max.latency<===>--accumulo-max-latencysqoop.args.accumulo.password<===>--accumulo-passwordsqoop.args.accumulo.row.key<===>--accumulo-row-keysqoop.args.accumulo.table<===>--accumulo-tablesqoop.args.accumulo.user<===>--accumulo-usersqoop.args.accumulo.visibility<===>--accumulo-visibilitysqoop.args.accumulo.zookeepers<===>--accumulo-zookeeperssqoop.args.bindir<===>--bindirsqoop.args.class.name<===>--class-namesqoop.args.input.null.non.string<===>--input-null-non-stringsqoop.args.input.null.string<===>--input-null-stringsqoop.args.jar.file<===>--jar-filesqoop.args.map.column.java<===>--map-column-javasqoop.args.null.non.string<===>--null-non-stringsqoop.args.null.string<===>--null-stringsqoop.args.outdir<===>--outdirsqoop.args.package.name<===>--package-namesqoop.args.conf<===>-confsqoop.args.D<===>-Dsqoop.args.fs<===>-fssqoop.args.jt<===>-jtsqoop.args.files<===>-filessqoop.args.libjars<===>-libjarssqoop.args.archives<===>-archivessqoop.args.update.key<===>--update-keysqoop.args.update.mode<===>--update-modesqoop.args.export.dir<===>--export-dir
    - + \ No newline at end of file diff --git a/docs/latest/introduction/index.html b/docs/latest/introduction/index.html index b2932814b20..28c73add99e 100644 --- a/docs/latest/introduction/index.html +++ b/docs/latest/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/latest/release/index.html b/docs/latest/release/index.html index e86d3260d97..a580654d61e 100644 --- a/docs/latest/release/index.html +++ b/docs/latest/release/index.html @@ -7,7 +7,7 @@ Version Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Version Overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    commonNewlinkis.codeType.runType.relationsql=>sql|hql|jdbc|hive|psql|fql,
    python=>python|py|pyspark,< br/>java=>java,scala=>scala,
    shell=>sh|shell
    mapping relationship between codeType and runType
    rpcNewlinkis.rpc.spring.params.enablefalseControls the ribbon mode parameter switch of the RPC module
    ecNewlinkis.engineconn.max.parallelism300Asynchronous execution supports setting the number of concurrent job groups
    ecNewlinkis.engineconn.async.group.max.running10
    ec-flinkNewlinkis.flink.execution.attachedtrue
    ec-flinkNewlinkis.flink.kerberos.enablefalse
    ec-flinkNewlinkis.flink.kerberos.login.contextsClient,KafkaClient
    ec-flinkNewlinkis.flink.kerberos.login.keytab
    ec-flinkNewlinkis.flink.kerberos.login.principal
    ec-flinkNewlinkis.flink.kerberos.krb5-conf.path
    ec-flinkNewlinkis.flink.params.placeholder.blank\0x001
    ec-sqoopNewsqoop.task.map.memory2
    ec-sqoopNewsqoop.task.map.cpu.cores1
    ec-sqoopNewsqoop.params.name.modesqoop.mode
    ec-sqoopNewsqoop.params.name.prefixsqoop.args.
    ec-sqoopNewsqoop.params.name.env.prefixsqoop.env.
    ec-sqoopNewlinkis.hadoop.site.xml/etc/hadoop/conf/core-site.xml;
    /etc/hadoop/conf/hdfs- site.xml;
    /etc/hadoop/conf/yarn-site.xml;
    /etc/hadoop/conf/mapred-site.xml
    set sqoop to load hadoop parameter file location
    ec-sqoopNewsqoop.fetch.status.interval5sSets the interval for obtaining sqoop execution status

    DB Table Changes#

    no change

    - + \ No newline at end of file diff --git a/docs/latest/table/udf-table/index.html b/docs/latest/table/udf-table/index.html index 912baae21bc..060366444fe 100644 --- a/docs/latest/table/udf-table/index.html +++ b/docs/latest/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF table structure | Apache Linkis - + @@ -16,7 +16,7 @@ udf_type 3: custom function - python functionudf_type 4: custom function - scala function

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/latest/tags/index.html b/docs/latest/tags/index.html index 62de7ad8b94..32eedc8d8e9 100644 --- a/docs/latest/tags/index.html +++ b/docs/latest/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/configuration/index.html b/docs/latest/tuning_and_troubleshooting/configuration/index.html index ef589f1f811..1a736ba4adb 100644 --- a/docs/latest/tuning_and_troubleshooting/configuration/index.html +++ b/docs/latest/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/overview/index.html b/docs/latest/tuning_and_troubleshooting/overview/index.html index cbaebdf4c9c..c6d5f18516e 100644 --- a/docs/latest/tuning_and_troubleshooting/overview/index.html +++ b/docs/latest/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/tuning/index.html b/docs/latest/tuning_and_troubleshooting/tuning/index.html index 530c7951a7d..b525acf5608 100644 --- a/docs/latest/tuning_and_troubleshooting/tuning/index.html +++ b/docs/latest/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 837e1abfd54..a503c5e5b21 100644 --- a/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/latest/upgrade/upgrade_guide/index.html b/docs/latest/upgrade/upgrade_guide/index.html index 13191770a16..4d2973dad00 100644 --- a/docs/latest/upgrade/upgrade_guide/index.html +++ b/docs/latest/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/console_manual/index.html b/docs/latest/user_guide/console_manual/index.html index adaaf8a58fa..a8c50a059e4 100644 --- a/docs/latest/user_guide/console_manual/index.html +++ b/docs/latest/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/latest/user_guide/how_to_use/index.html b/docs/latest/user_guide/how_to_use/index.html index 28446414175..9242b1104c4 100644 --- a/docs/latest/user_guide/how_to_use/index.html +++ b/docs/latest/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/latest/user_guide/linkis-datasource-client/index.html b/docs/latest/user_guide/linkis-datasource-client/index.html index 7bd2539abd2..ed86ebb400d 100644 --- a/docs/latest/user_guide/linkis-datasource-client/index.html +++ b/docs/latest/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/linkiscli_manual/index.html b/docs/latest/user_guide/linkiscli_manual/index.html index 8c0b98149c0..7adb9f6bf32 100644 --- a/docs/latest/user_guide/linkiscli_manual/index.html +++ b/docs/latest/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/overview/index.html b/docs/latest/user_guide/overview/index.html index 6b8350dbe5a..855b114bf95 100644 --- a/docs/latest/user_guide/overview/index.html +++ b/docs/latest/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/sdk_manual/index.html b/docs/latest/user_guide/sdk_manual/index.html index ff1054650ba..525e5931b91 100644 --- a/docs/latest/user_guide/sdk_manual/index.html +++ b/docs/latest/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/udf/index.html b/docs/latest/user_guide/udf/index.html index 57e74c05c4f..420cb9aef7f 100644 --- a/docs/latest/user_guide/udf/index.html +++ b/docs/latest/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/download/download-logo/index.html b/download/download-logo/index.html index 0e8748febf6..02a8479d390 100644 --- a/download/download-logo/index.html +++ b/download/download-logo/index.html @@ -7,7 +7,7 @@ Download Logo | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/download/main/index.html b/download/main/index.html index 0ed942a687b..79551c01d36 100644 --- a/download/main/index.html +++ b/download/main/index.html @@ -7,7 +7,7 @@ Release List | Apache Linkis - + @@ -23,7 +23,7 @@

    For detailed guidelines, please refer to: Compilation and Packaging Guidelines

    - + \ No newline at end of file diff --git a/download/release-notes-1.0.2/index.html b/download/release-notes-1.0.2/index.html index b6d93cdc2f3..8f397e10c68 100644 --- a/download/release-notes-1.0.2/index.html +++ b/download/release-notes-1.0.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.2 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.0.2

    This is Non-ASF Version

    Linkis-1.0.2 includes all of Project Linkis-1.0.2.

    This release mainly introduces Flink-support into Linkis ecosystem.

    The following key features are added:

    • Flink-EngineConn which offers solid support for Flink jobs. Executing, debugging and monitoring Flink SQL or applications are now available, together with SQL-enhancement ability powered by Linkis Orchestrator.
    • LinkisManagerClient which enables direct access to LinkisManager. Submitting and managing OnceJob rely on this feature.

    Abbreviations:

    CGS: Computation Governance Services

    PES: Public Enhancement Services

    MGS: Microservice Governance Services


    New Feature#

    EngineConn#

    • Linkis-936 [CGS-LinkisOnceEngineconn] supports OnceEngineExecutor

    EnginePlugin#

    • Linkis-935 [CGS-EngineConnPlugin-Flink] supports Flink EngineConn
    • Linkis-947 [CGS-EngineConnPlugin-Flink] supports executing Flink SQL and Flink applications
    • Linkis-948 [CGS-EngineConnPlugin-Flink] multiple-datasource support for Flink EngineConn
    • Linkis-949 [CGS-EngineConnPlugin-Flink] monitoring Flink Metrics

    ComputationClient#

    • Linkis-937 [CGS-LinkisComputationClient] supports OnceEngineExecutor client

    Enhancement#

    • Linkis-953 [CGS-LinkisManager] label supports '-' in hostname
    • Linkis-925 [MGS-LinkisServiceGateway] fix weak password in linkis gateway
    • Linkis-950 [CGS-LinkisEngineConnManager] support both ip address and hostname for service discovery
    • Linkis-967 [CGS-LinkisEntrance] remove instance-label client dependency, Solve the host name and ip judgment abnormality in the gateway router, exclude the pom dependency to pentaho-aggdesigner-algorithm jar.
    • Linkis-963 [PES-LinkisBmlServer] default download user changed to jvm user, and supports to set default download user by configuration.

    Bugs Fix#

    • Linkis-938 [CGS-LimkisMnagager] fixes a serial execution bug
    • Linkis-952 [CGS-LinkisEngineConn] fixes a redundant thread bug
    • Linkis-943 [CGS-EngineConnPlugin-Hive] fixes a Hive3.0 compilation error
    • Linkis-961 [CGS-EngineConnPlugin-Flink] fixes a Flink-EnginePlugin compilation bug
    • Linkis-966 [CGS-EngineConnPlugin-Hive][CGS-EnginePlugin-Spark] Solve Spark and hive compatibility issue

    Credits#

    The release of Linkis 1.0.2 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors!

    - + \ No newline at end of file diff --git a/download/release-notes-1.0.3/index.html b/download/release-notes-1.0.3/index.html index 9c71ff6377d..b2811540487 100644 --- a/download/release-notes-1.0.3/index.html +++ b/download/release-notes-1.0.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.3 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.0.3

    Apache Linkis(incubating) 1.0.3 includes all of Project Linkis-1.0.3.

    This version is the first version of Linkis entering Apache incubation. It mainly completes the ASF infrastructure construction, including license improvement/package name modification, etc. In addition, features such as EngineConn support for Operators are added, and bugs in version 1.0.2 reported by the community are fixed.

    The following key features are added:

    • Deprecate Jersey and use Spring MVC to build HTTP RESTful APIs
    • Replace codehaus json with fastxml json
    • Support EngineConn/OnceEngineConn common operators
    • Support proxy user with kerberos

    Abbreviations:

    • CGS: Computation Governance Services
    • PES: Public Enhancement Services
    • MGS: Microservice Governance Services
    • EC: Engineconn
    • ECM: EngineConnManager

    New Feature#

    • [CGS&PES&MGS][Linkis-1002] Deprecate Jersey and use Spring MVC to build HTTP RESTful APIs, use spring's DispatcherServlet and unify the annotations of the web interface
    • [CGS&PES&MGS][Linkis-1038] Upgrade codehaus json to stable fastxml json
    • [CGS-Engineconn][Linkis-1027] Support for accessing kerberos-enabled Hadoop clusters using Hadoop's proxy-user mechanism
    • [CGS-EngineConnManager][Linkis-1248] Support ECM to obtain all logs of EC when EC is running or stopped
    • [CGS-LinkisManager][Linkis-1043] Support engine operator, the client can perform specific operations on EngineConn through the engine operator and return result
    • [CGS-LinkisOnceEngineconn][Linkis-946] Support hostname and IP address for eureka service discovery and service invocation to meet containerized deployment scenarios such as k8s
    • [CGS-LinkisOnceEngineconn][Linkis-1078] Support EngineConn/OnceEngineConn general operator, providing basic capabilities for integrating with streaming computing engines

    Enhancement#

    • [Commons][Linkis-1026] Optimize the display of numeric type fields exported to Excel
    • [Commons][Linkis-1036] Optimize the file permissions of the shared file system in LocalFileSystem mode
    • [Commons][Linkis-1185] Add some scala code specification checking rules to automatically detect scala code format
    • [Orchestrator][Linkis-1183] Optimize the code with high cyclic complexity of the Orchestrator module and the Map problem under high concurrency ,
    • [MGS-LinkisServiceGateway][Linkis-1064] Support the whitelist configuration of http api, which can be called without user login authentication
    • [CGS-EngineConnManager][Linkis-1030] Support for transferring custom environment variables from ECM to EC
    • [CGS-EngineConnPlugin] [Linkis-1083] Unify and optimize the engineConnPlugin exception class
    • [CGS-EngineConnPlugin][Linkis-1203] Optimize tag update/delete logic
    • [CGS-EngineConnPlugin-JDBC] [Linkis-1117] Support kerberos authentication type for linkis jdbc
    • [CGS-EngineConnPlugin-Flink] [Linkis-1070] Optimize flink EngineConn in prod mode for jar application submitting and optimize the kill operation of Flink computation executor
    • [CGS-EngineConnPlugin-Flink] [Linkis-1248] Enhance the FlinkOnceJob, support to execute set, show grammar of flinkSQL
    • [CGS-EngineConnManager][Linkis-1167] Add JAVA_HOME for ProcessEngineConnLaunch
    • [CGS-ComputationClient][Linkis-1126] Support python matplotlib to display images
    • [CGS-Entrance][Linkis-1206] Optimize the logic of Entrance and add taskID to distinguish tasks
    • [CGS-LinkisManager][Linkis-1209] Optimize multiple functions commonly used by manager: add update and startup time attributes to node objects /yarn resource acquisition method
    • [CGS-LinkisManager][Linkis-1213] Optimize the relationship between long-lived tags and nodes
    • [CGS-LinkisManager][Linkis-1222] The response result of the request to support ECM registration is returned
    • [PES-PublicService][Linkis-1211] Optimize the database information update logic of jobhistory, remove the transaction, and add retry logic
    • [PES-Metadata][Linkis-1224] Remove the association restriction between datasource/dbs http interface query results and logged-in users through parameter configuration

    Bugs Fix#

    • [DB][Linkis-1053] Fix the problem of data insertion failure caused by too long database table fields
    • [DB][Linkis-1087] Remove duplicate DDL statements
    • [Commons][Linkis-1058] Fix the problem that the material package with subdirectories could not be compressed when uploading
    • [Commons][Linkis-1223] Upgrade log4j version to 2.17.0
    • [Commons][Linkis-1052] Fixed not getting route instance when hostname starts with application name
    • [CGS-LinkisManager][Linkis-1014] Fix the wrong usage of object equality judgment
    • [CGS-LinkisManager][Linkis-1054] Fix instance label parsing failure when hostname contains service name.
    • [CGS-LinkisManager][Linkis-1074] Fix NPE issue with http api 'rm/userresources'
    • [CGS-LinkisManager][Linkis-1101] Fixed the issue that the monitor failed to send the engine heartbeat RPC request, causing the engine to be killed by mistake
    • [CGS-LinkisManager][Linkis-1210] Fix instance check and engine tag exclusion bug
    • [CGS-LinkisManager][Linkis-1214] Fix multiple Bugs with high concurrency in RM
    • [CGS-LinkisManager][Linkis-1216] Remove node monitor module from AM
    • [MGS-LinkisServiceGateway][Linkis-1093] Fix permission bypass when the value of pass auth uri is empty
    • [MGS-LinkisServiceGateway][Linkis-1105] Fix linkis default test account weak password problem
    • [MGS-LinkisServiceGateway][Linkis-1234] Fix memory leak problem of SSO login
    • [CGS-Common][Linkis-1199] Fix SqlCodeParser to escape the separator ";" into multiple SQL
    • [CGS-Entrance][Linkis-1073] Fix http api 'entrance/{id}/killJobs' caused by unused parameters exception {ID}
    • [CGS-Entrance][Linkis-1106] VarSubstitutionInterceptor get code type bug fix
    • [CGS-Entrance][Linkis-1149] Fix the problem that the foreground cannot get the progress information after the job task is completed
    • [CGS-Entrance][Linkis-1205] Fixed LogWirter's oom bug
    • [CGS-EngineConnPlugin][Linkis-1113] Fix sql statement error when bml resource data record is updated
    • [CGS-EngineConnPlugin-JDBC] [Linkis-923] Fix the bug of connection url without JDBC engine
    • [CGS-EngineConnPlugin-Spark][Linkis-1017] Fix spark3 engine compilation error
    • [CGS-EngineConnPlugin-Flink][Linkis-1069] Fix the ClassNotfoundException problem caused by the lack of dependencies such as hadoop-mapreduce-client-core in the Flink engine
    • [CGS-EngineConnPlugin-Flink][Linkis-1128] Fix the problem of inaccurate table data insertion in the flink engine
    • [CGS-EngineConnPlugin-Flink][Linkis-1304] Fix the bug that flink sql cannot support multi-sql and when use set, rest, drop grammar, the checkpoint will invalid.
    • [CGS-EngineConnPlugins-Hive][Linkis-1231] Fix the progress bug of the engine pushing multiple sub-jobs
    • [PEC-BmlServer][Linkis-1155] Fix the problem of using mysql reserved words in sql statements
    • [PEC-CSServer][Linkis-1160] Fix NPE in CsJobListener
    • [Orchestrator][Linkis-1179] Fix the bug caused by orchestrator concurrency
    • [Orchestrator][Linkis-1186] Fix the problem that the tasks queued by Orchestrator cannot be killed
    • [Console][Linkis-1121] Get the protocol from the current request, remove the hard code for 'http'

    Other#

    • [Commons&MGS-LinkisServiceGateway][Linkis-1192] The third-party reliance on mysql-connector-java violates the Apache License Policy. Therefore, the dependency on mysql-connector-java has been removed from 1.0.3. If you only use it for your own project, you can add mysql-connector-java dependency to your project.
    • [Commons&MGS-LinkisEureka][Linkis-1291] Exclude jar packages with unclear license attribution category io.github.x-stream:mxparser
    • [Commons][Linkis-1287] Split binary distribution package and source code LICENSE/NOTICE and other files
    • [Console][Linkis-1301] Remove font files with unknown license authorization and front-end resource files such as unused image icons
    • [CGS-EngineConnPlugins-Python][Linkis-1281] Remove the pyspark.zip in the source code and add the LICENSE.txt of py4j document

    Credits#

    The release of Apache Linkis(incubating) 1.0.3 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors!

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.0/index.html b/download/release-notes-1.1.0/index.html index 76d810af03d..cdfd0ad5034 100644 --- a/download/release-notes-1.1.0/index.html +++ b/download/release-notes-1.1.0/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.0 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.0

    Apache Linkis(incubating) 1.1.0 includes all of Project Linkis-1.1.0.

    This release mainly adds datasource and metadata source management services, supports metadata information query for hive/mysql/kafka/elasticsearch, and fixes bugs in version 1.0.3 reported by the community.

    The following key features have been added:

    • Provides Restful interface to add, delete, check, and modify data sources, as well as data source connection tests.
    • Provides Restful interface for database, table, partition, column attribute query for metadata.
    • Provides Java clients for data source and metadata service management.

    Abbreviations:

    • CGS: Computation Governance Services
    • PES: Public Enhancement Services
    • MGS: Microservice Governance Services
    • EC: Engineconn
    • ECM: EngineConnManager
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service

    New Feature#

    • [DMS-Common][Linkis-1335] Add a new module linkis-datasource-manager-common, add datasource data structure/exception class/util class.
    • [DMS-Common][Linkis-1340] Add a new module linkis-metadata-manager-common, add metadata data structure/exception class/util class.
    • [MDS-Server][Linkis-1352] Add a new module linkis-datasource-manager-server to provide data source management services, provides functions such as adding, deleting, checking, and modifying data sources through the restful interface.
    • [MDS-Server][Linkis-1356] Add a new module linkis-metadata-manager-server to provide metadata management services, which provides database, table, and column queries of metadata through the restful interface.
    • [MDS-Services][Linkis-1366] Add a new module linkis-metadata-manager-service-es to provide elasticsearch metadata management service.
    • [MDS-Services][Linkis-1368] Add a new module linkis-metadata-manager-service-hive, providing for hive Metadata management service.
    • [MDS-Services][Linkis-1371] Add a new module linkis-metadata-manager-service-kafka, providing for kafka Metadata management service.
    • [MDS-Services][Linkis-1373] Add a new module linkis-metadata-manager-service-mysql, provide for mysql Metadata management service.
    • [DMS&MDS-Client][Linkis-1418] [[Linkis-1434]](https://github.com/apache /incubator-linkis/pull/1434)[Linkis-1438][[Linkis-1441]](https://github.com /apache/incubator-linkis/pull/1441) Add a new data source management Java client module linkis-datasource-client to facilitate data source management through sdk.
    • [DMS&MDS-Web][Linkis-1456] [[Linkis-1510] Added data source front-end management page, through which you can Simple creation and testing of the data source.

    Enhancement#

    • [MGS-LinkisServiceGateway][Linkis-1377] Introduce the Skywalking component to provide basic capabilities of distributed trace and troubleshooting
    • [CGS-EngineConnPlugin][Linkis-1408] Adjust the default maximum idle time of engine resources to 0.5h to optimize multi-user scenarios Next, the problem of waiting time for resource competition
    • [CGS-EngineConnPlugin][Linkis-1535] set JAVA_ENGINE_REQUEST_INSTANCE to constant 1
    • [DB][Linkis-1554] Add DataSource DDL and DML SQL
    • [MDS][Linkis-1583] Add functionality to get attributes of partitions in Hive datasources and fix connection issues
    • [MGS-LinkisServiceGateway][Linkis-1636] use regular expression to match gateway URL, if it matches, it will pass normally
    • [Commons][Linkis-1397] Add maven wrapper to support compiling and packaging using mvnw script
    • [EC][Linkis-1425] Unify ec's log configuration file as log4j2.xml
    • [Install-Script][Linkis-1563] Optimize linkis-cli client script, remove redundant linkis-cli- start script file
    • [Install-Script][Linkis-1559] Optimize the installation and deployment script, add a database connection test check when installing and deploying; Before initialization, print the information of the database so that the personnel can confirm again
    • [Install-Script][Linkis-1559]](https://github.com/apache/incubator-linkis/issues/1559) Add necessary deployment log information and color identification of key information, such as execution steps/create directory log, etc.
    • [Install-Script][Linkis-1559] add basic environment check for spark/hadoop/hive
    • [Install-Script][Linkis-1559] Migrate hive metabase HIVE_META information configuration from linkis-env.sh to db. sh
    • [Commons][Linkis-1557] Spring-boot/Spring-cloud version control uses the pom file method of the official dependency manager, Avoid introducing too many version configurations
    • [Commons][Linkis-1621] Spring upgrade, Spring-boot upgrade to 2.3.12.RELEASE, Spring-cloud upgrade to Hoxton.SR12
    • [Commons][Linkis-1558] Unit test JUnit 4 migration upgrade to JUnit 5
    • [Commons&MGS-Eureka][Linkis-1313] Remove unnecessary third-party dependencies and reduce packaged materials to a certain extent package size
    • [Commons&MGS-LinkisServiceGateway][Linkis-1660] Use spring-boot-starter-jetty to replace the direct introduction of jetty dependencies to avoid jetty version conflict

    Bugs Fix#

    • [Deployment][Linkis-1390] Fix the directory wds.linkis.resultSet for storing Job result set files created during installation and deployment. store.path, the problem of insufficient permissions after switching users during use
    • [Commons][Linkis-1469] Fix the problem that SQL cannot be cut correctly when the ';' character is included in the sql script
    • [CGS-EngineConnPlugin-JDBC][Linkis-1529] Fix the abnormal problem of NullPointerException in JDBC engine authentication type parameter
    • [CGS-Entrance][Linkis-1540] Fix the "kill" method parameter long type in linkis-entrance, which causes the null value to be unrecognized question
    • [Commons][Linkis-1600] Fix the lower version of commons-compress, resulting in an error when the result set is downloaded as excel
    • [CGS-Client][Linkis-1603] Fix the problem that the client does not support the -runtimeMap parameter
    • [CGS-EngineConnPlugin-JDBC][Linkis-1610] Fix jdbc engine cannot support "show databases;" statement problem for postgresql
    • [Commons][Linkis-1618] Fix http response return result in xml format instead of json format
    • [CGS-EngineConnPlugin-JDBC][Linkis-1646] When JDBC engine queries complex type fields, the value is displayed as object address.
    • [CGS-EngineConnPlugin-PYTHON][Linkis-1731] Fix the problem of row inversion of the result set field of the python engine's showDF function
    • [PES-BML][Linkis-1556] Fix the HttpMessageNotWritableException that may occur in the file download interface

    Credits#

    The release of Apache Linkis(incubating) 1.1.0 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following contributors for this version: Alexkun, CCweixiao, Celebrate-future, Davidhua1996, FireFoxAhri, WenxiangFan , Zosimer, aleneZeng, casionone, dddyszy, det101, ganlangjie, huapan123456, huiyuanjjjjuice, husofskyzy, iture123, jianwei2, legendtkl, peacewong, pjfanning, silent-carbon, xiaojie19852006

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.1/index.html b/download/release-notes-1.1.1/index.html index 8341f2cce66..ebe2f8bbcd8 100644 --- a/download/release-notes-1.1.1/index.html +++ b/download/release-notes-1.1.1/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.1 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.1

    Apache Linkis(incubating) 1.1.1 includes all of Project Linkis-1.1.1.

    This release mainly supports the functions of UDF multi-version control and UDF storage to BML; the submission task supports the collection and viewing of Yarn queue resource usage statistics; new support for the data virtualization engine OpenLooKeng; and known bugs reported by the community are fixed.

    The following key features have been added:

    • Support proxy user mode, user A can perform tasks on behalf of user B, one proxy user can proxy multiple users
    • Support UDF multi-version control and UDF storage to BML features
    • Submission of tasks supports the collection of Yarn queue resource usage statistics and the visual view of the management console page
    • Added support for data virtualization engine OpenLooKeng

    abbreviation:

    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager

    New Feature#

    • [Gateway&Entrance][Linkis-1608] Support proxy user mode, user A can perform tasks on behalf of user B, query user B's Related data, a proxy user can proxy multiple users
    • [LM-ResourceManager][Linkis-1616] The resource address configuration of YARN ResourceManager supports high-availability multi-address configuration, the current YARN ResourceManager conversion When the status or stop, the master node will be resolved from the high-availability address list to continue to provide services
    • [EC-OpenLooKeng][Linkis-1639] Added support for data virtualization engine OpenLooKeng
    • [UDF][Linkis-1534] Support UDF multi-version control and UDF storage to BML, submit tasks support Yarn queue resources Use statistics collection and management console page visualization
    • [Client][Linkis-1718] The Linkis-cli client supports submitting Once type tasks, which will only run once after the engine process is started Task, automatically destroyed after the task is over
    • [ECP][Linkis-1758] Add engine material refresh interface, support to refresh engine material resources through http interface call

    Enhancement#

    • [Gateway][Linkis-1430] For the Token authentication method, the Token acquisition is adjusted from the configuration file to the database table
    • [Entrance][Linkis-1642] Optimize the excel export interface resultsetToExcel: support passing the number of rows of downloaded data
    • [Entrance][Linkis-1733] Add support for more default time variables related to run_date
    • [Entrance][Linkis-1794] Add to limit the data size of a single row in the result set, and optimize the OOM problem caused by large result sets
    • [DMS-Common][Linkis-1757] Support to configure Hive metadata administrator, administrators can obtain hive's metadata through the interface Metadata information for all library tables
    • [Common][Linkis-1799] Optimize the segmentation of service logs: adjust the log history segmentation time from one day to one hour
    • [Common][Linkis-1921] Optimize Jackson's dependency management: manage jackson dependencies uniformly through jackson-bom, and upgrade to Version 2.11.4
    • [ECM][Linkis-1779] Optimize the status monitoring logic of ECM instances, and increase the judgment of heartbeat reporting time. The fix may be due to Eureka performance issues leading to misjudgment issues
    • [ECM][Linkis-1930] ECM resource is not checked when optimizing resource check
    • [Web][Linkis-1596] Optimize the use of the interface for viewing the task log of the management console, and fix the log cannot be used for the running job Timely refresh display issues
    • [Web][Linkis-1650] linkis console - global history page, support to filter historical task data by creator information search

    Bugs Fix#

    • [Entrance][Linkis-1623] Fix LogPath and ResultSetPath incorrectly use submitUser as executeUser
    • [Entrance][Linkis-1640] Fix LogReader using singleton InputStream, there is log loss, unable to read the latest persistent log The problem
    • [Entrance][Linkis-2009] Fix the problem of memory leak caused by not closing thread resources in Entrance service
    • [Entrance][Linkis-1901] Replaced the cache in EntranceFactory with Guava Cache, fixed that the user modified the concurrency parameter and it could not take effect The problem
    • [Entrance][Linkis-1986] Fix the abnormal number of lines obtained in the Entrance real-time log, resulting in the duplicated log obtained
    • [ECM][Linkis-1714] Optimize EC by reducing EC Java default memory size and adding retry log for EC application The abnormal problem of "Cannot allocate memory" appears
    • [ECM][Linkis-1806] Optimize the life cycle processing logic of EC, when ECM starts EC due to insufficient queue resources and timeout When the status is Failed, kill the EC process
    • [Common][Linkis-1721] Fixed the issue that hdfsFileSystem was not refreshed when Kerberos authentication failed
    • [UDF][Linkis-1728] Optimize /api/rest_j/v1/udf/all API interface for occasional queries time consuming problem
    • [Config][Linkis-1859] Fix the problem of abnormal primary key duplication in the console parameter configuration saveFullTree interface
    • [Clinet][Linkis-1739] Fix the ujes-client request, the parameter spelling error caused the parameter transmission to fail
    • [Client][Linkis-1783] Fix the problem that the default configuration of the task creator creator parameter does not take effect
    • [Client][Linkis-1821] Fix ujes-client request entity class GetTableStatisticInfoAction parameter is missing
    • [EC][Linkis-1765] Fix the blocking problem that EC triggers tryShutdown when the task is running
    • [LM-AppManager][Linkis-1814] Fix the response information returned by the createEngineConn interface of EngineRestfulApi is incorrect, resulting in NPE in client calls The problem.
    • [Web][Linkis-1972] Remove the dss related interface code left but not used by the management console for historical reasons
    • [EC-Spark][Linkis-1729] Add SparkPreExecutionHook function, compatible with the old package name before Linkis (com.webank.wedatasphere .linkis)
    • [EC-JDBC][Linkis-1851] Fix the jdbc engine, the problem that there are multiple sql statements in one task execution cannot be executed normally
    • [EC-JDBC][Linkis-1961] Fix the problem that the log cannot be printed normally due to the SLF4J dependency problem when the jdbc engine starts
    • [Gateway][Linkis-1898] Fix the problem that the initial domain name cannot be set when the GatewaySSOUtils user successfully logs in to generate a cookie

    Others#

    • [License][Linkis-2110] Removed the binary file .mvn/wrapper/maven-wrapper.jar in the source code, and adjusted the LICENSE content related to .mvn/*
    • [License][Linkis-2113] Upgrade py4j-0.10.7-src.zip to py4j-0.10.9.5-src.zip, update the license files of py4j-*.src and adjust it location, from linkis-engineconn-plugins/engineconn-plugins/python /src/main/py4j/LICENSE-py4j-0.10.7-src.txt moved to licenses/LICENSE-py4j-0.10.9.5-src.txt for easy viewing
    • Fixed the issue of using Window's line endings format CTRL in the release source code of shell script:mvnw

    Credits#

    The release of Apache Linkis(incubating) 1.1.1 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors: AbnerHung, Alexkun, barry8023, CCweixiao, Davidhua1996, Fuu3214, Liveipool, casinoone, demonray , husofskyzy, jackxu2011, legendtkl, lizheng920625, maidangdang44, peacewong, seedscoder

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.2/index.html b/download/release-notes-1.1.2/index.html index 3d6947f4f62..39edb38e729 100644 --- a/download/release-notes-1.1.2/index.html +++ b/download/release-notes-1.1.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.2 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.2

    Apache Linkis(incubating) 1.1.2 includes all of Project Linkis-1.1.2.

    This release mainly supports simplified deployment in an environment without HDFS (supports some engines), which is convenient for more lightweight learning, use and debugging; new support for data migration tool Sqoop engine; exception handling log optimization; some security vulnerabilities Component upgrades, etc.; fix known bugs reported by the community

    The main functions are as follows:

    • Supports simplified deployment in an environment without HDFS (supports some engines), which is convenient for more lightweight learning, use and debugging
    • Added support for data migration tool Sqoop engine
    • Optimize logs, etc. to improve the efficiency of troubleshooting
    • Fix the security issues of interfaces such as user unauthorized access
    • Some dependency package upgrades and community-known bug fixes

    abbreviation:

    • COMMON: Linkis Common
    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager
    • PS: Linkis Public Service
    • PE: Linkis Public Enhancement
    • RPC: Linkis Common RPC
    • CG: Linkis Computation Governance

    New Feature#

    • [Deployment][Linkis-1804,1811,1841,1843,1846,1933] Support for downsizing without HDFS Deployment (supports some engines), which is convenient for more lightweight learning, use and debugging.
    • [PS][Linkis-1949] Add the list interface (/listundone) of unfinished jobs, and optimize query performance by timing scheduling
    • [BML][Linkis-1811,1843] BML material service adds support for local file system storage mode deployment
    • [Common][Linkis-1887] RPC module Sender supports modifying parameters such as load balancing Ribbon
    • [Common][Linkis-2059] use task task id as trace id in logs
    • [EC][Linkis-1971] EC AsyncExecutor supports setting the number of parallel Job Groups
    • [Engine][Linkis-2109] Added support for data migration tool Sqoop engine

    Enhancement#

    • [ECP][Linkis-2074] Flink engine supports custom configuration
    • [Deployment][Linkis-1841] Support user deployment to disable Spark/Hive/HDFS environment detection
    • [Deployment][Linkis-1971] Fix the problem of automatically getting ip error when deploying on multiple NIC machines
    • [Entrance][Linkis-1941] Entrance supports passing raw jobId to EngineConn and LinkisManager
    • [Entrance][Linkis-2045] Refactor the matching relationship between script type and run type in EntranceInterceptor implementation class
    • [RPC][Linkis-1903] Modify the exception handling logic of the RPC module to transparently transmit the original error message of the EngineConnPlugin exception
    • [RPC][Linkis-1905] Add parameters to support passing LoadBalancer parameters, such as Ribbon
    • [Orchestrator][Linkis-1937] The orchestrator task scheduler creator configuration parameter supports configuring multiple creator values
    • [PE][Linkis-1959 ContextService adds necessary log printing to facilitate error troubleshooting
    • [EC][Linkis-1942] EC supports inserting taskID into the conf of the underlying engine, which is convenient for task bloodline analysis Associated with a specific linkis task
    • [EC][Linkis-1973] The execution error log acquisition method of Task is changed from cat to tail -1000 to control the number of logs and avoid Load large files in full
    • [CG,PE][Linkis-2014] Add configuration add/get/delete, optimize synchronization lock
    • [Common][Linkis-2016] Adjust the use of cglib dependencies, replace cglib dependencies with spring built-in cglib
    • [Gateway][Linkis-2071] Add GatewayURL attribute value to HTTP request Header

    Bugs Fix#

    • [Engine][Linkis-1931] Fix Python error loading is the function of Pyspark, not the function problem of stand-alone Python itself
    • [Deployment][Linkis-1853] Fix the problem of DDL error during installation initialization
    • [UDF][Linkis-1893] Add user permission check for udf related interfaces
    • [EC][Linkis-1933] Increase the write permission of resultSet for users who are not in the deploy user group to execute jobs
    • [EC][Linkis-1846] Fix ResultSet configuration local path is invalid
    • [EC][Linkis-1966] Replace System.ev with System.properties
    • [EC-Python][Linkis-2131] Fix Python engine exception caused by pandas
    • [PS][Linkis-1840] When downloading data in csv format, add flexible options to prevent data format disorder
    • [Orchestrator][Linkis-1992] fix concurrency issue with Orchestrator Reheater module
    • [PE][Linkis-2032] The configuration interface is optimized. When obtaining the configuration parameters of the Label, modify it to directly obtain the Key-value right
    • [Web][Linkis-2036] Instance display problem of ECM page of management console is fixed
    • [Web][Linkis-1895] Resource page display bug fix
    • [ECP][Linkis-2027] Fix abnormal error caused by ECP material download byte interception
    • [ECP][Linkis-2088] Fix the problem of progress rollback during hive task running
    • [ECP][Linkis-2090] Fix Python3 can't find the problem
    • [CG][Linkis-1751] Script custom variable run type and suffix constraint configuration
    • [CG][Linkis-2034] fix for mismatched descriptions of timed out tasks
    • [CG][Linkis-2100] Optimize db deadlock problem under high concurrency

    Security related#

    • [UDF][Linkis-1893] Fix some udf interfaces (/udf/list, /udf/tree/add, /udf /tree/update) user override problem
    • [PS][Linkis-1869] Fix Linkis PlublicService related interface override issue
    • [PS][Linkis-2086] The method /updateCategoryInfo adds permission check

    Dependency changes#

    • [MDS][Linkis-1947] mysql-connector-java upgraded from 5.1.34 to 8.0.16
    • [ECP][Linkis-1951] hive-jdbc upgraded from 1.2.1 to 2.3.3
    • [ECP][Linkis-1968] protobuf-java version upgrade to 3.15.8
    • [ECP][Linkis-2021] remove some redundant dependencies of Flink module
    • [RPC][Linkis-2018] unified version of json4s
    • [Web][Linkis-2336] Introduce the dependency of the web component jsencrypt-3.2.1 as a login password encryption and decryption tool

    Thanks#

    The release of Apache Linkis(incubating) 1.1.2 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors (in no particular order): Alexyang, Casion, David hua, GodfreyGuo, Jack Xu , Zosimer, allenlliu, casionone, ericlu, huapan123456, husofskyzy, iture123, legendtkl, luxl@chinatelecom.cn, maidangdang44, peacewong, pengfeiwei, seedscoder, weixiao, xiaojie19852006, めぐみん, Li Wei

    - + \ No newline at end of file diff --git a/faq/main/index.html b/faq/main/index.html index edf90a5d5aa..49b278d1e92 100644 --- a/faq/main/index.html +++ b/faq/main/index.html @@ -7,7 +7,7 @@ Q&A | Apache Linkis - + @@ -16,7 +16,7 @@

    Solution: cdh6.3.2 cluster spark engine classpath only has /opt/cloudera/parcels/CDH-6.3.2-1.cdh6.3.2.p0.1605554/lib/spark/jars, need to add hive-exec-2.1.1- cdh6.1.0.jar, then restart spark.

    Q17. When the spark engine starts, it reports queue default is not exists in YARN, the specific information is as follows:#

    linkis-exception-09.png

    Solution: When the 1.0 linkis-resource-manager-dev-1.0.0.jar pulls queue information, there is a compatibility problem in parsing json. After the official classmates optimize it, re-provide a new package. The jar package path: /appcom/Install/dss- linkis/linkis/lib/linkis-computation-governance/linkis-cg-linkismanager/.

    Q18, when the spark engine starts, an error is reported get the Yarn queue information excepiton. (get the Yarn queue information abnormal) and http link abnormal#

    Solution: To migrate the address configuration of yarn to the DB configuration, the following configuration needs to be added:

    db-config-02.png

    Q19. When the spark engine is scheduled, it can be executed successfully for the first time, and if executed again, it will report Spark application sc has already stopped, please restart it. The specific errors are as follows:#

    page-show-03.png

    Solution: The background is that the architecture of the linkis1.0 engine has been adjusted. After the spark session is created, in order to avoid overhead and improve execution efficiency, the session is reused. When we execute spark.scala for the first time, there is spark.stop() in our script. This command will cause the newly created session to be closed. When executed again, it will prompt that the session is closed, please restart it. Solution: first remove stop() from all scripts, and then execute the order: execute default.sql first, then execute scalaspark and pythonspark.

    Q20, pythonspark scheduling execution, error: initialize python executor failed ClassNotFoundException org.slf4j.impl.StaticLoggerBinder, as follows:#

    linkis-exception-10.png

    Solution: The reason is that the spark server lacks slf4j-log4j12-1.7.25.jar, copy the above jar and report to /opt/cloudera/parcels/CDH-6.3.2-1.cdh6.3.2.p0.1605554/lib/spark/jars .

    Q21, pythonspark scheduling execution, error: initialize python executor failed, submit-version error, as follows:#

    shell-error-03.png

    Solution: The reason is that the linkis1.0 pythonSpark engine has a bug in obtaining the spark version code. The fix is ​​as follows:

    code-fix-01.png

    Q22. When pythonspark is scheduled to execute, it reports TypeError: an integer is required (got type bytes) (executed separately from the command to pull up the engine), the details are as follows:#

    shell-error-04.png

    Solution: The reason is that the system spark and python versions are not compatible, python is 3.8, spark is 2.4.0-cdh6.3.2, spark requires python version<=3.6, reduce python to 3.6, comment file /opt/cloudera/parcels/CDH/ The following lines of lib/spark/python/lib/pyspark.zip/pyspark/context.py:

    shell-error-05.png

    Q23, spark engine is 2.4.0+cdh6.3.2, python engine was previously lacking pandas, matplotlib upgraded local python to 3.8, but spark does not support python3.8, only supports below 3.6;#

    Solution: reinstall the python package manager anaconda2, reduce python to 2.7, install pandas, matplotlib modules, python engine and spark engine can be scheduled normally.

    - + \ No newline at end of file diff --git a/home/index.html b/home/index.html index c3cb4123deb..fa285567f06 100644 --- a/home/index.html +++ b/home/index.html @@ -7,14 +7,14 @@ - +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/index.html b/index.html index f21240b5845..9d9a52b32be 100644 --- a/index.html +++ b/index.html @@ -7,7 +7,7 @@ Apache Linkis | Apache Linkis - + @@ -16,7 +16,7 @@

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/search/index.html b/search/index.html index 11a25444984..92cbbc7988b 100644 --- a/search/index.html +++ b/search/index.html @@ -7,7 +7,7 @@ Search the documentation | Apache Linkis - + @@ -15,7 +15,7 @@

    Search the documentation

    - + \ No newline at end of file diff --git a/team/index.html b/team/index.html index b7f78001095..2597a7e1542 100644 --- a/team/index.html +++ b/team/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Contributing


    You can participate in the contribution of Apache Linkis by reporting bugs/submitting new features or improvement suggestions/submitting patches/ writing or refining documents/attending community Q&A/organizing community activities, etc. For detailed instructions, please refer to Contributor's Guide.


    PPMC

    (In no particular order)

    Committer

    (Sort by English initials)

    Contributors of Apache Linkis

    Contributors of Apache Linkis WebSite

    - + \ No newline at end of file diff --git a/user/index.html b/user/index.html index e07528212ad..6884fe6fb87 100644 --- a/user/index.html +++ b/user/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Our Users

    This project is used by the following companies
    Are you using this project?you can add your company
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    - + \ No newline at end of file diff --git a/versions/index.html b/versions/index.html index 9213ffec16d..187b38c33c3 100644 --- a/versions/index.html +++ b/versions/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Apache Linkis all document versions


    This is the current document version

    Here you can find the currently published version of the document

    1.1.2DocumentRelease NoteSource Code

    This is an unpublished document version

    Here you can find the unpublished version of the document

    Next-1.1.3(WIP)Document

    This is the previously published version of the document

    1.1.1DocumentSource Code
    1.1.0DocumentSource Code
    1.0.3DocumentSource Code
    1.0.2DocumentSource Code
    0.11.0DocumentSource Code
    - + \ No newline at end of file diff --git a/zh-CN/404.html b/zh-CN/404.html index 68f9fe7de3f..b7eae9ec767 100644 --- a/zh-CN/404.html +++ b/zh-CN/404.html @@ -7,7 +7,7 @@ Page Not Found | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/assets/js/cd72fc6f.8b0c47c1.js b/zh-CN/assets/js/cd72fc6f.98522ec5.js similarity index 99% rename from zh-CN/assets/js/cd72fc6f.8b0c47c1.js rename to zh-CN/assets/js/cd72fc6f.98522ec5.js index 261696ca13a..1eb2338208d 100644 --- a/zh-CN/assets/js/cd72fc6f.8b0c47c1.js +++ b/zh-CN/assets/js/cd72fc6f.98522ec5.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[94791],{3905:function(e,t,i){i.d(t,{Zo:function(){return c},kt:function(){return h}});var a=i(67294);function n(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}function r(e,t){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),i.push.apply(i,a)}return i}function l(e){for(var t=1;t=0||(n[i]=e[i]);return n}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(n[i]=e[i])}return n}var p=a.createContext({}),o=function(e){var t=a.useContext(p),i=t;return e&&(i="function"==typeof e?e(t):l(l({},t),e)),i},c=function(e){var t=o(e.components);return a.createElement(p.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},k=a.forwardRef((function(e,t){var i=e.components,n=e.mdxType,r=e.originalType,p=e.parentName,c=u(e,["components","mdxType","originalType","parentName"]),k=o(i),h=n,m=k["".concat(p,".").concat(h)]||k[h]||s[h]||r;return i?a.createElement(m,l(l({ref:t},c),{},{components:i})):a.createElement(m,l({ref:t},c))}));function h(e,t){var i=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var r=i.length,l=new Array(r);l[0]=k;var u={};for(var p in t)hasOwnProperty.call(t,p)&&(u[p]=t[p]);u.originalType=e,u.mdxType="string"==typeof e?e:n,l[1]=u;for(var o=2;o=0||(n[i]=e[i]);return n}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(n[i]=e[i])}return n}var p=a.createContext({}),o=function(e){var t=a.useContext(p),i=t;return e&&(i="function"==typeof e?e(t):l(l({},t),e)),i},c=function(e){var t=o(e.components);return a.createElement(p.Provider,{value:t},e.children)},s={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},k=a.forwardRef((function(e,t){var i=e.components,n=e.mdxType,r=e.originalType,p=e.parentName,c=u(e,["components","mdxType","originalType","parentName"]),k=o(i),h=n,m=k["".concat(p,".").concat(h)]||k[h]||s[h]||r;return i?a.createElement(m,l(l({ref:t},c),{},{components:i})):a.createElement(m,l({ref:t},c))}));function h(e,t){var i=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var r=i.length,l=new Array(r);l[0]=k;var u={};for(var p in t)hasOwnProperty.call(t,p)&&(u[p]=t[p]);u.originalType=e,u.mdxType="string"==typeof e?e:n,l[1]=u;for(var o=2;o=d)&&Object.keys(n.O).every((function(e){return n.O[e](f[r])}))?f.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,a,d]},n.n=function(e){var c=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(c,{a:c}),c},f=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};c=c||[null,f({}),f([]),f(f)];for(var t=2&a&&e;"object"==typeof t&&!~c.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((function(c){b[c]=function(){return e[c]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,c){for(var f in c)n.o(c,f)&&!n.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:c[f]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(c,f){return n.f[f](e,c),c}),[]))},n.u=function(e){return"assets/js/"+({346:"8982281e",453:"151a86e1",502:"5f62e57a",786:"d209d9e9",963:"702f4255",974:"c10c3ff8",1036:"5003fc7b",1149:"5ba64b07",1248:"48107a87",1467:"f71d1a0a",1494:"ca7ffba6",1509:"92abb950",1521:"ba21dad4",1860:"4cc6c56b",1955:"15cdadf1",1975:"bee110f4",2265:"d829cefe",2818:"ce8e9344",2853:"168f4b70",2909:"6ddae745",2952:"b2efba3d",3456:"ceb8f0ab",3850:"0e10e9f0",3955:"d35fa7d5",3994:"eb170fbf",4015:"f11e6dfe",4149:"b8b1196d",4194:"73246d8b",4396:"b0a6c3f7",4479:"ecb50835",4494:"c0c5b7b9",4546:"4ddcc5f5",5162:"66523218",5232:"0eb102e3",5255:"c3f04348",5323:"71f86139",5334:"b080a527",5335:"e1b89189",5378:"fa3ac0b4",5503:"66f80fa7",5524:"2aa6ddf9",5588:"f0732286",5596:"e453d605",5892:"e48d35eb",6126:"39729459",6178:"7ebc5e69",6378:"f9b6f49f",6747:"46c60f02",6850:"1cf27110",6954:"e7ffef2b",7470:"56f6e57f",7492:"71a040bb",7498:"4d704204",7532:"5e2a796e",7605:"8713ab72",7914:"03b40afa",8293:"70577794",8743:"de1e1058",8965:"fc118b96",9036:"7a0af4d4",9345:"fb8bd50d",9474:"fe9993b3",9552:"fe0350e5",9632:"905f1251",9738:"cb24547f",10296:"6a66bf3d",10311:"5cd280de",10375:"811185b0",10391:"f12c70ac",10502:"9296efef",10594:"cb19b3af",10865:"558b68dd",10974:"611b4d09",11092:"66596a79",11119:"0252f584",11129:"15ad2644",11152:"8c4e9ef1",11535:"2ea06656",11564:"16e64748",11692:"e22883c8",12235:"39cae327",12357:"9d4b5d2f",12397:"60a01a00",12510:"4c7618e8",12581:"326e3b8c",12651:"bfef765c",12875:"a5707bb5",13036:"32497dc0",13099:"c657088c",13190:"828ffbf8",13438:"410c8754",13751:"3720c009",13831:"8e29cd0e",13847:"02b17c37",13933:"4a4836d7",14380:"928461ed",14400:"0bf94c3e",14657:"27ea2ad4",15129:"d9fea774",15299:"02b56946",16243:"5a278fd4",16286:"9347e21a",16406:"50bc71d4",16435:"b01c97ee",16742:"4c05f83b",16872:"6e46386b",16900:"8e4a9518",17061:"1835b842",17257:"387ebd51",17542:"66d63bfc",17612:"dfc8e523",17765:"db6c92b8",17855:"736ee592",18098:"6f7d9ea9",18166:"13c55284",18465:"faf5a39f",18782:"271d4f19",18855:"9968f92c",19247:"af61ff81",19476:"149fb5a9",19514:"94f6e7c5",19750:"34a37c44",20077:"1547ae4c",20261:"181d09a7",20337:"2196185d",20369:"5f098bb0",20498:"9ee87b7a",20563:"11d4ed91",20689:"1dd2c16e",20720:"2e35beaa",20761:"570cc32b",20873:"4d5bc9bf",21195:"7f513201",21242:"3f30441e",21369:"795cab7c",21390:"16e826d6",21639:"6ee84144",21759:"b20fb5b1",22032:"844dacdd",22101:"e2c90fd5",22286:"f5146b98",22527:"0e2f1b1d",22528:"48d54923",22686:"c18b3986",22849:"5c1a2740",22933:"c7092f1f",22934:"6ce38115",23075:"69191d03",23089:"18faf279",23117:"7bfd9ab5",23632:"75c45afe",24150:"b2ce4305",24348:"c1193d9a",24396:"1f618053",24514:"d4c55177",24550:"c64310e6",24561:"149e7686",24622:"527c5b3a",24994:"ab388b7d",25019:"53a0ac1c",25045:"cc8750f9",25063:"b3235340",25111:"4a2c7c47",25213:"2fcd5bc4",25284:"fee70dc7",25930:"02f66a1d",26224:"03b1f70b",26234:"1cb5e47c",26247:"866a1030",26324:"39f43530",26649:"c1a6a4cc",26706:"26ddef0e",27099:"32f65daf",27174:"957cd9ed",27598:"8837ae6a",27616:"5fdb3d36",27673:"15d49a70",27867:"03013e92",27918:"17896441",27925:"b104ea62",27957:"6ca50c15",27991:"dc1e40d7",28429:"bfb36362",28448:"898cbc84",28497:"ede6f05e",28633:"59b9dec2",28825:"46dcc3ab",28877:"0affaf4e",29085:"4854afc3",29212:"06a9ee64",29231:"0c159898",29376:"2fb52cdd",29440:"46273c88",29450:"80bee161",29463:"0d1a00ab",29470:"e75be527",29514:"1be78505",29522:"5509d565",29828:"374d152b",30305:"3c6d26f7",30477:"359731ac",30503:"26ae3e5f",30617:"022373a3",30672:"9c997609",30829:"f89165fa",31019:"9a0f7358",31047:"bc0eb055",31206:"30833634",31237:"37fb7ae0",31253:"45f550be",31795:"954c142c",31817:"8a29c50a",31964:"a7eaa5c9",32085:"f11d3660",32336:"764e68f7",32479:"e3dc7569",32617:"b32a71ec",32656:"c134f34e",33405:"2b5b9154",33408:"3f1d99cb",33560:"818823b9",33841:"c00ae604",34304:"f1fbe14b",34414:"aef886b3",34769:"e63926f4",34777:"aed59f8d",34893:"0f1bab08",35003:"43123582",35135:"fb9b1244",35513:"8e846628",35557:"64ed3b8d",35707:"2e1d0e00",35775:"77683134",36180:"baddade3",36311:"a5f6ffc8",36336:"7c63d7b7",36425:"44261163",36511:"b01117d5",37383:"2fbaaf24",37387:"6c9b06a2",37542:"a1466dcd",37797:"79af763e",37976:"00a3bd95",38528:"6daa70f6",38663:"8b63b041",38762:"2fce9687",39158:"877de300",39182:"3e5bd18c",39214:"af574889",39236:"3720b455",39248:"ff7e2f40",39486:"17a37fd7",39638:"0d8abdac",39898:"2a230721",40504:"c634d66a",40512:"5845ef18",40616:"b36e607b",40758:"f0efad3d",41022:"b982bd12",41026:"b3dd23a0",41139:"08074961",41341:"c4acfde6",41468:"04e216b2",42253:"450551e4",42486:"6a7c7d85",43025:"3964d11e",43052:"c1bb201c",43530:"75e407ea",43960:"1cc7dd5e",44172:"88c632e2",44334:"d2c8d872",44394:"1c2fc4cf",44482:"18dd72b8",44696:"0ddecc65",44787:"02df832d",44835:"c678d5ca",44999:"1a93f120",45088:"4233d542",45205:"f5ec55d8",45230:"0ffb9b7e",45407:"cc72fd7a",45487:"0c7dbc5f",45601:"5a7a66a0",45611:"470cc4da",45663:"60f1d01a",45810:"8dc69e89",46023:"6e68be01",46103:"ccc49370",46207:"175db8b4",46220:"0cc84c5f",46516:"4dda80eb",46522:"9ef00cda",46617:"4d8df7e8",46798:"57fd7486",46876:"ccf90adf",47209:"976643f8",47223:"f1295e58",47266:"11e1aecf",47325:"e915bea9",47474:"08915a4a",47611:"b8115b89",47838:"e3315455",47903:"50ef6dc2",48152:"6dbf4be1",48217:"597b902f",48360:"08bd5166",48422:"15cd02d1",48610:"6875c492",48747:"9cc1bad5",48751:"94a0f419",48762:"579b0b82",48826:"a4065928",48932:"248e03f5",49136:"86e0ce03",49222:"ecc3006b",49417:"dc5bddce",49418:"9ae78c43",49476:"65eee9f9",49851:"1b1e6bcf",49893:"f91aa9d3",50027:"38e75aa0",50029:"7cbf873c",50200:"ca0e9c8f",50310:"84e22c91",50531:"21636274",50552:"8f0b7a14",50578:"4f4bfeb0",50603:"89196382",50926:"d4aac00d",50947:"e5e4671e",51030:"9a0e6c1f",51471:"560144f1",51511:"dd4806ef",51702:"066c27d8",51841:"dedbedf9",52058:"058f6c1b",52145:"0ba1b308",52183:"1f0a9aa5",52363:"33e61bb8",52481:"4efc5364",52486:"c7d2638f",52491:"4845ec28",52535:"814f3328",52541:"98157455",52632:"93491d6c",52721:"b88fca75",52989:"6a2e0576",53051:"01ca90a6",53172:"3d2bc35a",53490:"f3d8650d",53608:"9e4087bc",54086:"93ca4beb",54485:"9c417a61",54688:"17f86a2c",54746:"6162fc79",55060:"13fefa32",55171:"4eb638d8",55355:"bfa865b1",55571:"79993517",55938:"a2ce9e02",56230:"33b1402c",56474:"5f634645",56967:"5ff4ba61",57233:"5d3f7811",57253:"33b0f542",57413:"09b37ae7",57463:"7edd234c",57934:"af138731",58146:"410fbc48",58326:"e15bcb33",58474:"11453aa0",58556:"af186705",58576:"07a8ac53",58626:"3bd6e845",58940:"c7d6b528",59365:"9194fa03",59840:"5b29caaf",60035:"7e73bb19",60060:"bd46bd94",60309:"a525c01a",60418:"02ffda68",60438:"5839c5e6",60452:"bf8803da",60470:"9d39ae71",60483:"03f643c5",60975:"3a689c31",61126:"c24c40c9",61194:"aa6c625d",61312:"051f0cab",61576:"390548a3",61925:"a472dc25",62076:"43f15380",62823:"4ea94e33",62888:"a332ba00",63013:"b2e90211",63020:"1667253d",63280:"2e842b4c",63339:"cc3d931f",63495:"f70a4257",63718:"9c98909f",63905:"d4c73231",64013:"01a85c17",64233:"895f9d8e",64380:"ea1c9497",64676:"ff917c93",64772:"729835ef",64885:"7f22ac22",64927:"b525f102",64949:"364dd0b4",65014:"2b57c3c9",65057:"5078bbef",65197:"8d998be3",65201:"f417129b",65290:"f0b63de6",65309:"e2382f3b",65437:"946bbf55",65544:"30329f3c",65760:"99c5b032",65808:"de7cc143",66017:"ae0ce2bf",66185:"ef38fca0",66465:"eef6ba27",66553:"963f614e",66584:"4724f855",66674:"7cc42675",66705:"adaf5b5c",66873:"28aafd8c",66937:"806f9cca",67235:"ba04e7c1",67317:"16690b75",67780:"925d8af7",68065:"4bf23d2c",68281:"25d494a0",68505:"faf61c95",68651:"f257988d",68798:"a86749d0",69012:"581dea95",69045:"74515d61",69347:"58fe5e6e",69778:"38ddf9f9",70066:"e570e820",70126:"eccc351c",70251:"4a05a5a1",70269:"23782f26",70419:"441dbced",70659:"f5c88462",70971:"00f88e88",71075:"1b8561f3",71341:"333c24af",71695:"124cb083",71786:"3a9af82d",71960:"b0cae314",72024:"d8f0341f",72104:"808e96ea",72142:"87089b5f",72146:"8a4d686c",72157:"3b36ad21",72163:"41c6bdc3",72216:"35c72bf3",72371:"b9a8e5b1",72487:"916b15b1",72747:"0b319c4d",73033:"2c7f1c11",73255:"fe07bdbe",73289:"2993eb6c",73297:"da9d9d40",73333:"92f7f021",73544:"a5a1aefc",73556:"c3b6f34c",73736:"21ceaf0b",73906:"0e434e9f",73997:"9ce623e0",74121:"55960ee5",74231:"e5493fbb",74818:"785d2d5e",74939:"b674895b",75e3:"771623b5",75110:"1fb7a886",75148:"4ea105bf",75216:"c8f808cb",75583:"1adbb4a7",75605:"d56b9b04",75692:"ff591fbc",76060:"65008442",76363:"29641b83",76437:"44d531a0",76620:"f3938e37",77426:"48aac528",77902:"2cf7d993",77952:"e19d0691",78029:"77816f9e",78330:"caec546c",78454:"1edc715e",78485:"9694c975",78761:"afa0b998",78980:"c1063e9f",79691:"ad663fde",79694:"2c02d8be",79938:"d54637cb",79999:"0632d5e4",80053:"935f2afb",80062:"cfc0c37c",80263:"4762897f",80372:"a3c8c7c2",80733:"9b6ad22f",80748:"de6cfe3a",81093:"e041305e",81786:"3800ddde",82060:"f5df6522",82173:"d3152fab",82271:"4aa0eb50",82400:"359d4f76",82444:"9eafe30c",82641:"54a0eb18",82682:"6d8bdd90",82717:"6054d46c",82841:"1f97f226",82859:"c5fc055b",83331:"b320c360",83341:"60ffc9e9",83430:"589616dd",83713:"5d6f0cba",84014:"64fcac21",84340:"27b439bc",84905:"f915e645",85181:"e0c93076",86015:"f36e204c",86037:"e8cfdebd",86143:"b40fd1d6",86264:"02bbd093",86607:"01304813",86743:"9a647680",86932:"47a075bf",87002:"a4e3a305",87054:"9dd8a0d2",87164:"2864ec2f",87253:"f81106e5",88290:"d0c6b3c0",88423:"cdf529de",88426:"48422a68",88463:"115c1cc6",88629:"27dbc74e",88733:"b80bd506",89339:"641be88a",89520:"24327667",89820:"50c09d93",89846:"3010b6c6",89926:"13e8b6e8",90241:"820f361b",90330:"0c38459a",90391:"283536cd",90406:"023e064d",90538:"de271c79",90867:"b77d38b5",91073:"f4c75e9f",91542:"146c40a3",91755:"460e656a",91977:"41fe0b81",92074:"8903e609",92132:"dbeafd1b",92489:"14d47647",93089:"a6aa9e1f",93115:"cb951476",93259:"c1fd58a9",93302:"7148444c",93377:"1426a1d7",93380:"0f458f65",93591:"d6321c51",93735:"6b62a9a4",93803:"70a4d7d3",93962:"c0c74b72",93979:"99a30ab3",94138:"5df84640",94141:"fece5140",94172:"439ddd83",94195:"ae1d45b8",94424:"a08e4012",94439:"f4730b20",94478:"ce55c90c",94629:"caa9028b",94640:"fb0c6c89",94791:"cd72fc6f",95012:"27d06669",95086:"31601111",95260:"f2e34371",95369:"39a7dcea",96136:"e1f07afe",96188:"20ab1817",96247:"b12f6675",96369:"397839d3",96739:"eb05c290",96938:"61198ef2",96992:"c456f623",97451:"b62874ee",97507:"cb9d8c24",97597:"8cc02d9f",97616:"306a8c6c",97622:"64db7480",97689:"bdca809f",97892:"be3cf78a",97899:"6fc19996",98091:"1f71503e",98094:"f4c00f3f",98381:"1a35bc33",98647:"ddb6fedf",98651:"03b29b8f",98953:"a79652e8",98959:"01a26e04",99056:"dc1a190b",99181:"530ea569",99348:"52286226",99548:"5414ec7d",99553:"4fd00f9f",99646:"84447780",99670:"c9ae514c",99722:"22aae707",99924:"df203c0f",99944:"390879c4"}[e]||e)+"."+{346:"1f8806d9",453:"cf6598d5",502:"709ecf39",786:"ce2181da",963:"8c38015f",974:"804367e7",1036:"ad7eab74",1149:"0c434cbd",1248:"0e976e98",1467:"f2b51243",1494:"dd7c57bc",1509:"8c65da09",1521:"8c5d9eab",1860:"31e6d7cf",1955:"d729c497",1975:"9de28ecf",2265:"3731095d",2818:"40d51f94",2853:"05d75d7a",2909:"10ab86d1",2952:"398746f7",3456:"5f329077",3829:"2a47bdd2",3850:"f24e9ea5",3955:"cdbe4f15",3994:"ec629861",4015:"83b8dcf5",4149:"4044399a",4194:"52297fcb",4396:"53e1a778",4479:"60bc4084",4494:"b0463345",4546:"8c4831ad",5162:"32249d3e",5232:"a0372713",5255:"f0513f5e",5323:"937d093d",5334:"12fbde0e",5335:"22c1001e",5378:"db5aa732",5503:"184f212f",5524:"d6a7cd5e",5588:"31129896",5596:"0573fc62",5892:"ed084295",6126:"3f89dfbe",6178:"998126a7",6378:"c0fa579c",6747:"f79a77da",6850:"4e66f687",6954:"8bbec96d",7470:"dc18e01e",7492:"94b9edad",7498:"17ca6c8b",7532:"189c1f94",7605:"9d64be80",7914:"d05e29a8",8293:"79422ad7",8743:"ce0a3d8d",8965:"50b75080",9036:"9748d3a8",9345:"b05fd722",9474:"39ae0fdf",9552:"942572bb",9632:"6f8c52c8",9738:"33b93deb",10296:"d6aa94a4",10311:"387390fc",10375:"54414020",10391:"120475ce",10502:"fb8b2ceb",10594:"b3f1ad00",10865:"3803c70a",10972:"bfd6544a",10974:"2e4485e3",11092:"57b26c80",11119:"48cbe33f",11129:"30252ad2",11152:"a588ed25",11535:"67dad406",11564:"4cc893e5",11692:"31b54503",12235:"d611346a",12357:"c12d27b1",12397:"c74bd93c",12510:"b481946f",12581:"9a747c14",12651:"85ca15c0",12875:"d948765a",13036:"0ffd1cee",13099:"07681f7d",13190:"f5c51fd0",13438:"afc9072a",13751:"1f2d2496",13831:"3e79aa5d",13847:"6bd39754",13933:"b927c7e7",14380:"c637bfa1",14400:"72bf05e7",14657:"99a5eafe",15129:"5310dbdd",15299:"cd23ea90",16243:"35a709a7",16286:"951b50a9",16406:"0c13ab47",16435:"76f0dc77",16742:"59b8b60f",16872:"3934d90a",16900:"d6fabf42",17061:"61cb7869",17257:"bae69e9f",17542:"c54e0a18",17612:"98f91be4",17765:"eca8f084",17855:"9c97fd42",18098:"eac03630",18166:"7295783b",18465:"c06b8d73",18782:"cb26705f",18855:"cd86d191",19247:"032387fb",19476:"6f5b0062",19514:"e410faab",19750:"4cab4dfe",20077:"86105e7e",20261:"8b88084f",20337:"e8cfc6fa",20369:"cfb3b521",20498:"c0a3f19a",20563:"959811a9",20689:"32851d76",20720:"d5988fc6",20761:"57ce7d67",20873:"fb57a675",21195:"0eedc7b1",21242:"e58a9470",21369:"36ba0439",21390:"4c69a3db",21639:"417032f5",21759:"152cb3fa",22032:"d6c07f5e",22101:"0a015cbc",22286:"24cbda1e",22527:"0e0100ce",22528:"1124deae",22686:"f28202a6",22849:"ed888e86",22933:"df268f64",22934:"eaa25d58",23075:"db66a0f0",23089:"e614c53a",23117:"37fff2d9",23632:"9431d38e",24150:"01dce3af",24348:"ac282e2a",24396:"d5d5857a",24514:"f91686f0",24550:"f1d7bb46",24561:"fa8770bc",24608:"9c4d2d11",24622:"9be3798b",24994:"a6c205a4",25019:"b8176936",25045:"e1d428c7",25063:"6882775d",25111:"0ef715d0",25213:"7ce4da41",25284:"248cfbef",25930:"d1c103c7",26224:"95bac4e1",26234:"c63994dc",26247:"51def8aa",26324:"9ebe0597",26649:"5e8c49a1",26706:"ea4aec80",27099:"753759ab",27174:"36399036",27598:"8b29878d",27616:"d65abce8",27673:"4a5ab75a",27867:"d7d79eac",27918:"d0a8d8ed",27925:"2a5c16a8",27957:"428dabaf",27991:"3772c88a",28429:"2a876683",28448:"2370a249",28497:"a2108344",28633:"c0737130",28825:"f054df4b",28877:"aa33bea7",29085:"6c4ec725",29212:"873bcf3d",29231:"6939bb2d",29376:"64367516",29440:"3524e61f",29450:"5c29a9e2",29463:"eb53093a",29470:"978fbb98",29514:"c2b68c2b",29522:"1f0699da",29828:"62e6b52d",30305:"b0aa0385",30477:"32827fa2",30503:"11fefa4c",30617:"7f954424",30672:"cd42c5b6",30829:"3dedc35d",31019:"8f43ab7e",31047:"4f3b0772",31206:"a3eb5a8d",31237:"c896d9a9",31253:"7e8f17df",31795:"ec5d2432",31817:"65893f96",31964:"9d711b7c",32085:"f44f9cc8",32336:"a05fd6c5",32479:"3fbd6403",32617:"e0f5f739",32656:"d0edbb82",33405:"fab08588",33408:"b126f228",33560:"3d37821a",33841:"9ee6942d",34304:"b461d927",34414:"bc5e30fc",34769:"a12d4370",34777:"2f8ccea4",34893:"0f6aee44",35003:"fb1a343e",35135:"96f37e66",35513:"a7219aa9",35557:"a32e3bd3",35707:"71cdb4d7",35775:"f748c0f8",36180:"5ab358cc",36311:"81564718",36336:"acee71c5",36425:"1dd5a995",36511:"9d2cb9d7",37383:"19ac5d35",37387:"f9625158",37542:"d7eafe35",37797:"82078eda",37976:"0d4ff4bb",38528:"7ac4ba4f",38663:"bb0dd05b",38762:"f422b7af",39158:"0743994b",39182:"afef0ab8",39214:"081a7273",39236:"480c4cf6",39248:"a15cd856",39486:"936f5cdc",39638:"67cb34c2",39898:"6db57ab4",40504:"5fd6fde6",40512:"894e2f8e",40616:"a02fef48",40758:"41fd29d2",41022:"4520c4f3",41026:"be2d8a72",41139:"6200c46b",41341:"bfb07e6c",41468:"cf983898",42253:"5f93feb7",42486:"3b0a4954",43025:"ef4dbc52",43052:"d9b2dc4a",43530:"686b3cce",43960:"cfe7ba55",44172:"978f1159",44334:"4f670810",44394:"eb2de966",44482:"13137e56",44696:"e57586f5",44787:"6501b356",44835:"064c936c",44999:"067608a5",45088:"2485e54e",45205:"79890cd1",45230:"bd5dc018",45407:"fce963dd",45487:"40a2ad76",45601:"9de95d06",45611:"bc15d259",45663:"0003e7e5",45810:"f54625ef",46023:"564e0b0c",46103:"c269c6ad",46207:"71e7635c",46220:"20914407",46516:"56892399",46522:"0cba3c3b",46617:"9fbb7012",46798:"9cccc16e",46876:"25efa3a7",46945:"4deecdf7",47209:"59af3317",47223:"402d3ee1",47266:"de804449",47325:"c7ef1185",47474:"d1bd7565",47611:"9765e6b7",47838:"42538b1f",47903:"d93b2136",48152:"afacac05",48217:"4ffb5bb0",48360:"704c48e5",48422:"b76f994b",48610:"884dfaf4",48747:"ec2f42c0",48751:"e0f9acd0",48762:"4a94347e",48826:"e47dee74",48932:"0a2ccfb1",49136:"904801c9",49222:"8d8781fc",49417:"8fbdef48",49418:"e71dddf1",49476:"df608972",49851:"4cc2de52",49893:"9092b94f",50027:"e6625ba7",50029:"76e354d6",50200:"1c08e454",50310:"d5fb960d",50531:"2818e6b2",50552:"6189e28c",50578:"1bbf9848",50603:"f24f693c",50926:"24058230",50947:"0cc3e088",51030:"14ac24a1",51471:"9bb1d895",51511:"0f477ec7",51702:"82cdafbe",51841:"39deda53",52058:"c0596fa2",52145:"d68e05fc",52183:"fca7ce0f",52363:"c6528778",52481:"1dbbbae1",52486:"c6a43c82",52491:"0ab03d24",52535:"01cc4554",52541:"fdf837d2",52632:"e91aad0d",52721:"ff0e49a9",52989:"62481cd5",53051:"5918ab88",53172:"33e5955c",53490:"f2bc0bca",53608:"51fbf6e8",54086:"66949672",54485:"1be6dab5",54688:"bf160cb3",54746:"b56f8f53",55040:"d1cb509f",55060:"5bdab039",55171:"7d7a4430",55355:"df622322",55571:"ea16f475",55938:"8c1ffd7c",56230:"e9cc4f5e",56474:"5bf53cac",56967:"a6085895",57233:"d62a477c",57253:"14b4d2b2",57413:"4065fa7c",57463:"dd42ae19",57934:"2ce29a10",58146:"824e3d99",58326:"53569147",58474:"fc1ecb4b",58556:"16973e4a",58576:"4f21392f",58626:"f9476fe5",58940:"a33481f2",59365:"d4152f8c",59840:"ed93c730",60035:"46d1ee94",60060:"be42bf8a",60309:"425bd2a8",60418:"c9b1742f",60438:"77d29868",60452:"b44200e8",60470:"dc0f453e",60483:"87ce9487",60975:"0954feaa",61126:"85cbd828",61194:"5b8fd1f4",61312:"9176c59d",61576:"3b9fd01a",61925:"3f6ba8ad",62076:"120a1f15",62823:"f5a69935",62888:"0631f832",63013:"3537ce72",63020:"e8cb865f",63280:"06acfbeb",63339:"254dc414",63495:"40071eeb",63718:"f5c58fd3",63905:"1678fb8d",64013:"75f40f00",64233:"3a42ff14",64380:"fc449630",64676:"2b568f76",64772:"8b33418b",64885:"1f95b268",64927:"960d51ee",64949:"52ffd2c9",65014:"18a99462",65057:"b5978b91",65197:"63932d60",65201:"d5ccea93",65290:"b49d6695",65309:"b074cdf0",65437:"07404a3f",65544:"d72a4f75",65760:"7926750e",65808:"a6b12c86",66017:"58f22b3a",66185:"c79eca9c",66465:"c4321497",66553:"9908e4ce",66584:"2c9d8afa",66674:"47627822",66705:"69b35bde",66873:"ae76af99",66937:"b1b19d82",67235:"ee70323d",67317:"766d9300",67780:"4f2b4634",68065:"430f87ae",68281:"48a47da8",68505:"c9a07adb",68651:"061f8aa6",68798:"2ab61aa9",69012:"e0bcc580",69045:"9e6a3b67",69347:"1db547de",69778:"2d9f2718",70066:"f9ff8f33",70126:"ffc08d3e",70251:"275b3970",70269:"18e07bba",70419:"9825066b",70659:"13ec1e41",70971:"2840b71a",71075:"3a00e42c",71341:"d034f411",71695:"e46e3e4b",71786:"369a75df",71960:"6e67fd41",72024:"b2517c14",72104:"8bf5c4a6",72142:"9c9bd8d9",72146:"f23e6200",72157:"8697cd1b",72163:"871d93fa",72216:"8dfcd8d2",72371:"5b0bc988",72487:"51bec9b9",72747:"e6aaea6d",73033:"7ec9829d",73255:"e753ee18",73289:"fb393ba4",73297:"578d52fc",73333:"9b8fedf6",73544:"3bf081ca",73556:"0633dfe3",73736:"fa002262",73906:"2a0c2848",73997:"ed3354e6",74121:"3f5b9cdc",74231:"6a4650f8",74818:"b2143f3e",74939:"e0069315",75e3:"1e565904",75110:"4100dc62",75148:"bda08611",75216:"4dd080e3",75583:"9a9ae92e",75605:"229635d9",75692:"d0bce39b",76060:"63c1bac4",76363:"19b435c8",76437:"e524c582",76620:"d04f70ab",77426:"2a8f5b60",77902:"469e35f6",77952:"0f2eced3",78029:"077e8f2c",78330:"3b655924",78454:"243e9743",78485:"826d0f64",78761:"fb113da9",78980:"0f7558d6",79691:"b8c2fd0e",79694:"90bedd6e",79938:"f6177cf4",79999:"b2907541",80053:"74f42bde",80062:"fe21c522",80263:"a06ab1f9",80372:"1ee029b0",80733:"657cfd97",80748:"5a7edca5",81093:"49abe713",81786:"d1d272da",82060:"43ad112d",82173:"58f72bb4",82271:"42bf6897",82400:"ebfd081c",82444:"4c77c723",82641:"8c233587",82682:"ee82c36a",82717:"25829f78",82841:"3607f1bf",82859:"47584a73",83331:"e292aae4",83341:"df2e2a7a",83430:"dbae9ac3",83713:"eeeb7c66",84014:"c818c7b2",84340:"d766fee5",84905:"d463d264",85181:"0ee456e3",86015:"384a73b1",86037:"19da953b",86143:"d886d369",86264:"6bccbacf",86607:"b5bc064b",86743:"1a5aafa2",86932:"3b6982d0",87002:"a0929106",87054:"e59e7145",87164:"bed87687",87253:"65b66fb4",88290:"deebd8d3",88423:"6f24a30a",88426:"2467aaba",88463:"9dc845ad",88629:"49eb4573",88733:"9e1f5970",89339:"f6c096ac",89520:"4a5b6a95",89820:"70b849b0",89846:"4ca4c970",89926:"7d9186b8",90241:"61409d11",90330:"7df4a720",90391:"6efc9024",90406:"4f5c583b",90538:"2b02e08f",90867:"a25f0ead",91073:"aae3a5b6",91542:"16428814",91755:"d6618687",91977:"af6778c9",92074:"f51839a9",92132:"85988d9b",92489:"038ab22d",93089:"1ea8fcbb",93115:"99e790a3",93259:"3be856d7",93302:"28b09874",93377:"a6fd242d",93380:"df1c9a07",93591:"b351b1a8",93735:"28fb6c5a",93803:"488776d6",93962:"1bd3c69f",93979:"aa8617b2",94138:"fe89382e",94141:"d29d4e8d",94172:"cd23ba4f",94195:"fde22395",94424:"f52c6355",94439:"2149dc0e",94478:"9c5e9030",94629:"2eaf9893",94640:"085012a9",94791:"8b0c47c1",95012:"f0e1bf8f",95086:"fae61d1a",95260:"573f146c",95369:"87993913",96136:"63754c59",96188:"52a77d95",96247:"3fa61b10",96369:"31df64c6",96739:"522823a5",96938:"fc55850f",96992:"0a998dcd",97451:"91c6999c",97507:"79735d81",97597:"ed8ae9e8",97616:"1205e2d5",97622:"5a16c207",97689:"64d0c3a5",97892:"38c82c13",97899:"9dc5e1cd",98091:"f15d3941",98094:"b271d297",98381:"76ca2bf9",98647:"956d09f7",98651:"23f302c8",98953:"83d60e10",98959:"963763fc",99056:"93e03b8e",99181:"b04f2d24",99348:"7be329f9",99548:"361872e3",99553:"0e71de72",99646:"8962e152",99670:"0d439352",99722:"c78385f1",99924:"5ffd65fa",99944:"7deb33f6"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.e9faf9bb.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,c){return Object.prototype.hasOwnProperty.call(e,c)},a={},d="linkis-web-apache:",n.l=function(e,c,f,b){if(a[e])a[e].push(c);else{var t,r;if(void 0!==f)for(var o=document.getElementsByTagName("script"),i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](f[r])}))?f.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,a,d]},n.n=function(e){var c=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(c,{a:c}),c},f=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};c=c||[null,f({}),f([]),f(f)];for(var t=2&a&&e;"object"==typeof t&&!~c.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((function(c){b[c]=function(){return e[c]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,c){for(var f in c)n.o(c,f)&&!n.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:c[f]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(c,f){return n.f[f](e,c),c}),[]))},n.u=function(e){return"assets/js/"+({346:"8982281e",453:"151a86e1",502:"5f62e57a",786:"d209d9e9",963:"702f4255",974:"c10c3ff8",1036:"5003fc7b",1149:"5ba64b07",1248:"48107a87",1467:"f71d1a0a",1494:"ca7ffba6",1509:"92abb950",1521:"ba21dad4",1860:"4cc6c56b",1955:"15cdadf1",1975:"bee110f4",2265:"d829cefe",2818:"ce8e9344",2853:"168f4b70",2909:"6ddae745",2952:"b2efba3d",3456:"ceb8f0ab",3850:"0e10e9f0",3955:"d35fa7d5",3994:"eb170fbf",4015:"f11e6dfe",4149:"b8b1196d",4194:"73246d8b",4396:"b0a6c3f7",4479:"ecb50835",4494:"c0c5b7b9",4546:"4ddcc5f5",5162:"66523218",5232:"0eb102e3",5255:"c3f04348",5323:"71f86139",5334:"b080a527",5335:"e1b89189",5378:"fa3ac0b4",5503:"66f80fa7",5524:"2aa6ddf9",5588:"f0732286",5596:"e453d605",5892:"e48d35eb",6126:"39729459",6178:"7ebc5e69",6378:"f9b6f49f",6747:"46c60f02",6850:"1cf27110",6954:"e7ffef2b",7470:"56f6e57f",7492:"71a040bb",7498:"4d704204",7532:"5e2a796e",7605:"8713ab72",7914:"03b40afa",8293:"70577794",8743:"de1e1058",8965:"fc118b96",9036:"7a0af4d4",9345:"fb8bd50d",9474:"fe9993b3",9552:"fe0350e5",9632:"905f1251",9738:"cb24547f",10296:"6a66bf3d",10311:"5cd280de",10375:"811185b0",10391:"f12c70ac",10502:"9296efef",10594:"cb19b3af",10865:"558b68dd",10974:"611b4d09",11092:"66596a79",11119:"0252f584",11129:"15ad2644",11152:"8c4e9ef1",11535:"2ea06656",11564:"16e64748",11692:"e22883c8",12235:"39cae327",12357:"9d4b5d2f",12397:"60a01a00",12510:"4c7618e8",12581:"326e3b8c",12651:"bfef765c",12875:"a5707bb5",13036:"32497dc0",13099:"c657088c",13190:"828ffbf8",13438:"410c8754",13751:"3720c009",13831:"8e29cd0e",13847:"02b17c37",13933:"4a4836d7",14380:"928461ed",14400:"0bf94c3e",14657:"27ea2ad4",15129:"d9fea774",15299:"02b56946",16243:"5a278fd4",16286:"9347e21a",16406:"50bc71d4",16435:"b01c97ee",16742:"4c05f83b",16872:"6e46386b",16900:"8e4a9518",17061:"1835b842",17257:"387ebd51",17542:"66d63bfc",17612:"dfc8e523",17765:"db6c92b8",17855:"736ee592",18098:"6f7d9ea9",18166:"13c55284",18465:"faf5a39f",18782:"271d4f19",18855:"9968f92c",19247:"af61ff81",19476:"149fb5a9",19514:"94f6e7c5",19750:"34a37c44",20077:"1547ae4c",20261:"181d09a7",20337:"2196185d",20369:"5f098bb0",20498:"9ee87b7a",20563:"11d4ed91",20689:"1dd2c16e",20720:"2e35beaa",20761:"570cc32b",20873:"4d5bc9bf",21195:"7f513201",21242:"3f30441e",21369:"795cab7c",21390:"16e826d6",21639:"6ee84144",21759:"b20fb5b1",22032:"844dacdd",22101:"e2c90fd5",22286:"f5146b98",22527:"0e2f1b1d",22528:"48d54923",22686:"c18b3986",22849:"5c1a2740",22933:"c7092f1f",22934:"6ce38115",23075:"69191d03",23089:"18faf279",23117:"7bfd9ab5",23632:"75c45afe",24150:"b2ce4305",24348:"c1193d9a",24396:"1f618053",24514:"d4c55177",24550:"c64310e6",24561:"149e7686",24622:"527c5b3a",24994:"ab388b7d",25019:"53a0ac1c",25045:"cc8750f9",25063:"b3235340",25111:"4a2c7c47",25213:"2fcd5bc4",25284:"fee70dc7",25930:"02f66a1d",26224:"03b1f70b",26234:"1cb5e47c",26247:"866a1030",26324:"39f43530",26649:"c1a6a4cc",26706:"26ddef0e",27099:"32f65daf",27174:"957cd9ed",27598:"8837ae6a",27616:"5fdb3d36",27673:"15d49a70",27867:"03013e92",27918:"17896441",27925:"b104ea62",27957:"6ca50c15",27991:"dc1e40d7",28429:"bfb36362",28448:"898cbc84",28497:"ede6f05e",28633:"59b9dec2",28825:"46dcc3ab",28877:"0affaf4e",29085:"4854afc3",29212:"06a9ee64",29231:"0c159898",29376:"2fb52cdd",29440:"46273c88",29450:"80bee161",29463:"0d1a00ab",29470:"e75be527",29514:"1be78505",29522:"5509d565",29828:"374d152b",30305:"3c6d26f7",30477:"359731ac",30503:"26ae3e5f",30617:"022373a3",30672:"9c997609",30829:"f89165fa",31019:"9a0f7358",31047:"bc0eb055",31206:"30833634",31237:"37fb7ae0",31253:"45f550be",31795:"954c142c",31817:"8a29c50a",31964:"a7eaa5c9",32085:"f11d3660",32336:"764e68f7",32479:"e3dc7569",32617:"b32a71ec",32656:"c134f34e",33405:"2b5b9154",33408:"3f1d99cb",33560:"818823b9",33841:"c00ae604",34304:"f1fbe14b",34414:"aef886b3",34769:"e63926f4",34777:"aed59f8d",34893:"0f1bab08",35003:"43123582",35135:"fb9b1244",35513:"8e846628",35557:"64ed3b8d",35707:"2e1d0e00",35775:"77683134",36180:"baddade3",36311:"a5f6ffc8",36336:"7c63d7b7",36425:"44261163",36511:"b01117d5",37383:"2fbaaf24",37387:"6c9b06a2",37542:"a1466dcd",37797:"79af763e",37976:"00a3bd95",38528:"6daa70f6",38663:"8b63b041",38762:"2fce9687",39158:"877de300",39182:"3e5bd18c",39214:"af574889",39236:"3720b455",39248:"ff7e2f40",39486:"17a37fd7",39638:"0d8abdac",39898:"2a230721",40504:"c634d66a",40512:"5845ef18",40616:"b36e607b",40758:"f0efad3d",41022:"b982bd12",41026:"b3dd23a0",41139:"08074961",41341:"c4acfde6",41468:"04e216b2",42253:"450551e4",42486:"6a7c7d85",43025:"3964d11e",43052:"c1bb201c",43530:"75e407ea",43960:"1cc7dd5e",44172:"88c632e2",44334:"d2c8d872",44394:"1c2fc4cf",44482:"18dd72b8",44696:"0ddecc65",44787:"02df832d",44835:"c678d5ca",44999:"1a93f120",45088:"4233d542",45205:"f5ec55d8",45230:"0ffb9b7e",45407:"cc72fd7a",45487:"0c7dbc5f",45601:"5a7a66a0",45611:"470cc4da",45663:"60f1d01a",45810:"8dc69e89",46023:"6e68be01",46103:"ccc49370",46207:"175db8b4",46220:"0cc84c5f",46516:"4dda80eb",46522:"9ef00cda",46617:"4d8df7e8",46798:"57fd7486",46876:"ccf90adf",47209:"976643f8",47223:"f1295e58",47266:"11e1aecf",47325:"e915bea9",47474:"08915a4a",47611:"b8115b89",47838:"e3315455",47903:"50ef6dc2",48152:"6dbf4be1",48217:"597b902f",48360:"08bd5166",48422:"15cd02d1",48610:"6875c492",48747:"9cc1bad5",48751:"94a0f419",48762:"579b0b82",48826:"a4065928",48932:"248e03f5",49136:"86e0ce03",49222:"ecc3006b",49417:"dc5bddce",49418:"9ae78c43",49476:"65eee9f9",49851:"1b1e6bcf",49893:"f91aa9d3",50027:"38e75aa0",50029:"7cbf873c",50200:"ca0e9c8f",50310:"84e22c91",50531:"21636274",50552:"8f0b7a14",50578:"4f4bfeb0",50603:"89196382",50926:"d4aac00d",50947:"e5e4671e",51030:"9a0e6c1f",51471:"560144f1",51511:"dd4806ef",51702:"066c27d8",51841:"dedbedf9",52058:"058f6c1b",52145:"0ba1b308",52183:"1f0a9aa5",52363:"33e61bb8",52481:"4efc5364",52486:"c7d2638f",52491:"4845ec28",52535:"814f3328",52541:"98157455",52632:"93491d6c",52721:"b88fca75",52989:"6a2e0576",53051:"01ca90a6",53172:"3d2bc35a",53490:"f3d8650d",53608:"9e4087bc",54086:"93ca4beb",54485:"9c417a61",54688:"17f86a2c",54746:"6162fc79",55060:"13fefa32",55171:"4eb638d8",55355:"bfa865b1",55571:"79993517",55938:"a2ce9e02",56230:"33b1402c",56474:"5f634645",56967:"5ff4ba61",57233:"5d3f7811",57253:"33b0f542",57413:"09b37ae7",57463:"7edd234c",57934:"af138731",58146:"410fbc48",58326:"e15bcb33",58474:"11453aa0",58556:"af186705",58576:"07a8ac53",58626:"3bd6e845",58940:"c7d6b528",59365:"9194fa03",59840:"5b29caaf",60035:"7e73bb19",60060:"bd46bd94",60309:"a525c01a",60418:"02ffda68",60438:"5839c5e6",60452:"bf8803da",60470:"9d39ae71",60483:"03f643c5",60975:"3a689c31",61126:"c24c40c9",61194:"aa6c625d",61312:"051f0cab",61576:"390548a3",61925:"a472dc25",62076:"43f15380",62823:"4ea94e33",62888:"a332ba00",63013:"b2e90211",63020:"1667253d",63280:"2e842b4c",63339:"cc3d931f",63495:"f70a4257",63718:"9c98909f",63905:"d4c73231",64013:"01a85c17",64233:"895f9d8e",64380:"ea1c9497",64676:"ff917c93",64772:"729835ef",64885:"7f22ac22",64927:"b525f102",64949:"364dd0b4",65014:"2b57c3c9",65057:"5078bbef",65197:"8d998be3",65201:"f417129b",65290:"f0b63de6",65309:"e2382f3b",65437:"946bbf55",65544:"30329f3c",65760:"99c5b032",65808:"de7cc143",66017:"ae0ce2bf",66185:"ef38fca0",66465:"eef6ba27",66553:"963f614e",66584:"4724f855",66674:"7cc42675",66705:"adaf5b5c",66873:"28aafd8c",66937:"806f9cca",67235:"ba04e7c1",67317:"16690b75",67780:"925d8af7",68065:"4bf23d2c",68281:"25d494a0",68505:"faf61c95",68651:"f257988d",68798:"a86749d0",69012:"581dea95",69045:"74515d61",69347:"58fe5e6e",69778:"38ddf9f9",70066:"e570e820",70126:"eccc351c",70251:"4a05a5a1",70269:"23782f26",70419:"441dbced",70659:"f5c88462",70971:"00f88e88",71075:"1b8561f3",71341:"333c24af",71695:"124cb083",71786:"3a9af82d",71960:"b0cae314",72024:"d8f0341f",72104:"808e96ea",72142:"87089b5f",72146:"8a4d686c",72157:"3b36ad21",72163:"41c6bdc3",72216:"35c72bf3",72371:"b9a8e5b1",72487:"916b15b1",72747:"0b319c4d",73033:"2c7f1c11",73255:"fe07bdbe",73289:"2993eb6c",73297:"da9d9d40",73333:"92f7f021",73544:"a5a1aefc",73556:"c3b6f34c",73736:"21ceaf0b",73906:"0e434e9f",73997:"9ce623e0",74121:"55960ee5",74231:"e5493fbb",74818:"785d2d5e",74939:"b674895b",75e3:"771623b5",75110:"1fb7a886",75148:"4ea105bf",75216:"c8f808cb",75583:"1adbb4a7",75605:"d56b9b04",75692:"ff591fbc",76060:"65008442",76363:"29641b83",76437:"44d531a0",76620:"f3938e37",77426:"48aac528",77902:"2cf7d993",77952:"e19d0691",78029:"77816f9e",78330:"caec546c",78454:"1edc715e",78485:"9694c975",78761:"afa0b998",78980:"c1063e9f",79691:"ad663fde",79694:"2c02d8be",79938:"d54637cb",79999:"0632d5e4",80053:"935f2afb",80062:"cfc0c37c",80263:"4762897f",80372:"a3c8c7c2",80733:"9b6ad22f",80748:"de6cfe3a",81093:"e041305e",81786:"3800ddde",82060:"f5df6522",82173:"d3152fab",82271:"4aa0eb50",82400:"359d4f76",82444:"9eafe30c",82641:"54a0eb18",82682:"6d8bdd90",82717:"6054d46c",82841:"1f97f226",82859:"c5fc055b",83331:"b320c360",83341:"60ffc9e9",83430:"589616dd",83713:"5d6f0cba",84014:"64fcac21",84340:"27b439bc",84905:"f915e645",85181:"e0c93076",86015:"f36e204c",86037:"e8cfdebd",86143:"b40fd1d6",86264:"02bbd093",86607:"01304813",86743:"9a647680",86932:"47a075bf",87002:"a4e3a305",87054:"9dd8a0d2",87164:"2864ec2f",87253:"f81106e5",88290:"d0c6b3c0",88423:"cdf529de",88426:"48422a68",88463:"115c1cc6",88629:"27dbc74e",88733:"b80bd506",89339:"641be88a",89520:"24327667",89820:"50c09d93",89846:"3010b6c6",89926:"13e8b6e8",90241:"820f361b",90330:"0c38459a",90391:"283536cd",90406:"023e064d",90538:"de271c79",90867:"b77d38b5",91073:"f4c75e9f",91542:"146c40a3",91755:"460e656a",91977:"41fe0b81",92074:"8903e609",92132:"dbeafd1b",92489:"14d47647",93089:"a6aa9e1f",93115:"cb951476",93259:"c1fd58a9",93302:"7148444c",93377:"1426a1d7",93380:"0f458f65",93591:"d6321c51",93735:"6b62a9a4",93803:"70a4d7d3",93962:"c0c74b72",93979:"99a30ab3",94138:"5df84640",94141:"fece5140",94172:"439ddd83",94195:"ae1d45b8",94424:"a08e4012",94439:"f4730b20",94478:"ce55c90c",94629:"caa9028b",94640:"fb0c6c89",94791:"cd72fc6f",95012:"27d06669",95086:"31601111",95260:"f2e34371",95369:"39a7dcea",96136:"e1f07afe",96188:"20ab1817",96247:"b12f6675",96369:"397839d3",96739:"eb05c290",96938:"61198ef2",96992:"c456f623",97451:"b62874ee",97507:"cb9d8c24",97597:"8cc02d9f",97616:"306a8c6c",97622:"64db7480",97689:"bdca809f",97892:"be3cf78a",97899:"6fc19996",98091:"1f71503e",98094:"f4c00f3f",98381:"1a35bc33",98647:"ddb6fedf",98651:"03b29b8f",98953:"a79652e8",98959:"01a26e04",99056:"dc1a190b",99181:"530ea569",99348:"52286226",99548:"5414ec7d",99553:"4fd00f9f",99646:"84447780",99670:"c9ae514c",99722:"22aae707",99924:"df203c0f",99944:"390879c4"}[e]||e)+"."+{346:"1f8806d9",453:"cf6598d5",502:"709ecf39",786:"ce2181da",963:"8c38015f",974:"804367e7",1036:"ad7eab74",1149:"0c434cbd",1248:"0e976e98",1467:"f2b51243",1494:"dd7c57bc",1509:"8c65da09",1521:"8c5d9eab",1860:"31e6d7cf",1955:"d729c497",1975:"9de28ecf",2265:"3731095d",2818:"40d51f94",2853:"05d75d7a",2909:"10ab86d1",2952:"398746f7",3456:"5f329077",3829:"2a47bdd2",3850:"f24e9ea5",3955:"cdbe4f15",3994:"ec629861",4015:"83b8dcf5",4149:"4044399a",4194:"52297fcb",4396:"53e1a778",4479:"60bc4084",4494:"b0463345",4546:"8c4831ad",5162:"32249d3e",5232:"a0372713",5255:"f0513f5e",5323:"937d093d",5334:"12fbde0e",5335:"22c1001e",5378:"db5aa732",5503:"184f212f",5524:"d6a7cd5e",5588:"31129896",5596:"0573fc62",5892:"ed084295",6126:"3f89dfbe",6178:"998126a7",6378:"c0fa579c",6747:"f79a77da",6850:"4e66f687",6954:"8bbec96d",7470:"dc18e01e",7492:"94b9edad",7498:"17ca6c8b",7532:"189c1f94",7605:"9d64be80",7914:"d05e29a8",8293:"79422ad7",8743:"ce0a3d8d",8965:"50b75080",9036:"9748d3a8",9345:"b05fd722",9474:"39ae0fdf",9552:"942572bb",9632:"6f8c52c8",9738:"33b93deb",10296:"d6aa94a4",10311:"387390fc",10375:"54414020",10391:"120475ce",10502:"fb8b2ceb",10594:"b3f1ad00",10865:"3803c70a",10972:"bfd6544a",10974:"2e4485e3",11092:"57b26c80",11119:"48cbe33f",11129:"30252ad2",11152:"a588ed25",11535:"67dad406",11564:"4cc893e5",11692:"31b54503",12235:"d611346a",12357:"c12d27b1",12397:"c74bd93c",12510:"b481946f",12581:"9a747c14",12651:"85ca15c0",12875:"d948765a",13036:"0ffd1cee",13099:"07681f7d",13190:"f5c51fd0",13438:"afc9072a",13751:"1f2d2496",13831:"3e79aa5d",13847:"6bd39754",13933:"b927c7e7",14380:"c637bfa1",14400:"72bf05e7",14657:"99a5eafe",15129:"5310dbdd",15299:"cd23ea90",16243:"35a709a7",16286:"951b50a9",16406:"0c13ab47",16435:"76f0dc77",16742:"59b8b60f",16872:"3934d90a",16900:"d6fabf42",17061:"61cb7869",17257:"bae69e9f",17542:"c54e0a18",17612:"98f91be4",17765:"eca8f084",17855:"9c97fd42",18098:"eac03630",18166:"7295783b",18465:"c06b8d73",18782:"cb26705f",18855:"cd86d191",19247:"032387fb",19476:"6f5b0062",19514:"e410faab",19750:"4cab4dfe",20077:"86105e7e",20261:"8b88084f",20337:"e8cfc6fa",20369:"cfb3b521",20498:"c0a3f19a",20563:"959811a9",20689:"32851d76",20720:"d5988fc6",20761:"57ce7d67",20873:"fb57a675",21195:"0eedc7b1",21242:"e58a9470",21369:"36ba0439",21390:"4c69a3db",21639:"417032f5",21759:"152cb3fa",22032:"d6c07f5e",22101:"0a015cbc",22286:"24cbda1e",22527:"0e0100ce",22528:"1124deae",22686:"f28202a6",22849:"ed888e86",22933:"df268f64",22934:"eaa25d58",23075:"db66a0f0",23089:"e614c53a",23117:"37fff2d9",23632:"9431d38e",24150:"01dce3af",24348:"ac282e2a",24396:"d5d5857a",24514:"f91686f0",24550:"f1d7bb46",24561:"fa8770bc",24608:"9c4d2d11",24622:"9be3798b",24994:"a6c205a4",25019:"b8176936",25045:"e1d428c7",25063:"6882775d",25111:"0ef715d0",25213:"7ce4da41",25284:"248cfbef",25930:"d1c103c7",26224:"95bac4e1",26234:"c63994dc",26247:"51def8aa",26324:"9ebe0597",26649:"5e8c49a1",26706:"ea4aec80",27099:"753759ab",27174:"36399036",27598:"8b29878d",27616:"d65abce8",27673:"4a5ab75a",27867:"d7d79eac",27918:"d0a8d8ed",27925:"2a5c16a8",27957:"428dabaf",27991:"3772c88a",28429:"2a876683",28448:"2370a249",28497:"a2108344",28633:"c0737130",28825:"f054df4b",28877:"aa33bea7",29085:"6c4ec725",29212:"873bcf3d",29231:"6939bb2d",29376:"64367516",29440:"3524e61f",29450:"5c29a9e2",29463:"eb53093a",29470:"978fbb98",29514:"c2b68c2b",29522:"1f0699da",29828:"62e6b52d",30305:"b0aa0385",30477:"32827fa2",30503:"11fefa4c",30617:"7f954424",30672:"cd42c5b6",30829:"3dedc35d",31019:"8f43ab7e",31047:"4f3b0772",31206:"a3eb5a8d",31237:"c896d9a9",31253:"7e8f17df",31795:"ec5d2432",31817:"65893f96",31964:"9d711b7c",32085:"f44f9cc8",32336:"a05fd6c5",32479:"3fbd6403",32617:"e0f5f739",32656:"d0edbb82",33405:"fab08588",33408:"b126f228",33560:"3d37821a",33841:"9ee6942d",34304:"b461d927",34414:"bc5e30fc",34769:"a12d4370",34777:"2f8ccea4",34893:"0f6aee44",35003:"fb1a343e",35135:"96f37e66",35513:"a7219aa9",35557:"a32e3bd3",35707:"71cdb4d7",35775:"f748c0f8",36180:"5ab358cc",36311:"81564718",36336:"acee71c5",36425:"1dd5a995",36511:"9d2cb9d7",37383:"19ac5d35",37387:"f9625158",37542:"d7eafe35",37797:"82078eda",37976:"0d4ff4bb",38528:"7ac4ba4f",38663:"bb0dd05b",38762:"f422b7af",39158:"0743994b",39182:"afef0ab8",39214:"081a7273",39236:"480c4cf6",39248:"a15cd856",39486:"936f5cdc",39638:"67cb34c2",39898:"6db57ab4",40504:"5fd6fde6",40512:"894e2f8e",40616:"a02fef48",40758:"41fd29d2",41022:"4520c4f3",41026:"be2d8a72",41139:"6200c46b",41341:"bfb07e6c",41468:"cf983898",42253:"5f93feb7",42486:"3b0a4954",43025:"ef4dbc52",43052:"d9b2dc4a",43530:"686b3cce",43960:"cfe7ba55",44172:"978f1159",44334:"4f670810",44394:"eb2de966",44482:"13137e56",44696:"e57586f5",44787:"6501b356",44835:"064c936c",44999:"067608a5",45088:"2485e54e",45205:"79890cd1",45230:"bd5dc018",45407:"fce963dd",45487:"40a2ad76",45601:"9de95d06",45611:"bc15d259",45663:"0003e7e5",45810:"f54625ef",46023:"564e0b0c",46103:"c269c6ad",46207:"71e7635c",46220:"20914407",46516:"56892399",46522:"0cba3c3b",46617:"9fbb7012",46798:"9cccc16e",46876:"25efa3a7",46945:"4deecdf7",47209:"59af3317",47223:"402d3ee1",47266:"de804449",47325:"c7ef1185",47474:"d1bd7565",47611:"9765e6b7",47838:"42538b1f",47903:"d93b2136",48152:"afacac05",48217:"4ffb5bb0",48360:"704c48e5",48422:"b76f994b",48610:"884dfaf4",48747:"ec2f42c0",48751:"e0f9acd0",48762:"4a94347e",48826:"e47dee74",48932:"0a2ccfb1",49136:"904801c9",49222:"8d8781fc",49417:"8fbdef48",49418:"e71dddf1",49476:"df608972",49851:"4cc2de52",49893:"9092b94f",50027:"e6625ba7",50029:"76e354d6",50200:"1c08e454",50310:"d5fb960d",50531:"2818e6b2",50552:"6189e28c",50578:"1bbf9848",50603:"f24f693c",50926:"24058230",50947:"0cc3e088",51030:"14ac24a1",51471:"9bb1d895",51511:"0f477ec7",51702:"82cdafbe",51841:"39deda53",52058:"c0596fa2",52145:"d68e05fc",52183:"fca7ce0f",52363:"c6528778",52481:"1dbbbae1",52486:"c6a43c82",52491:"0ab03d24",52535:"01cc4554",52541:"fdf837d2",52632:"e91aad0d",52721:"ff0e49a9",52989:"62481cd5",53051:"5918ab88",53172:"33e5955c",53490:"f2bc0bca",53608:"51fbf6e8",54086:"66949672",54485:"1be6dab5",54688:"bf160cb3",54746:"b56f8f53",55040:"d1cb509f",55060:"5bdab039",55171:"7d7a4430",55355:"df622322",55571:"ea16f475",55938:"8c1ffd7c",56230:"e9cc4f5e",56474:"5bf53cac",56967:"a6085895",57233:"d62a477c",57253:"14b4d2b2",57413:"4065fa7c",57463:"dd42ae19",57934:"2ce29a10",58146:"824e3d99",58326:"53569147",58474:"fc1ecb4b",58556:"16973e4a",58576:"4f21392f",58626:"f9476fe5",58940:"a33481f2",59365:"d4152f8c",59840:"ed93c730",60035:"46d1ee94",60060:"be42bf8a",60309:"425bd2a8",60418:"c9b1742f",60438:"77d29868",60452:"b44200e8",60470:"dc0f453e",60483:"87ce9487",60975:"0954feaa",61126:"85cbd828",61194:"5b8fd1f4",61312:"9176c59d",61576:"3b9fd01a",61925:"3f6ba8ad",62076:"120a1f15",62823:"f5a69935",62888:"0631f832",63013:"3537ce72",63020:"e8cb865f",63280:"06acfbeb",63339:"254dc414",63495:"40071eeb",63718:"f5c58fd3",63905:"1678fb8d",64013:"75f40f00",64233:"3a42ff14",64380:"fc449630",64676:"2b568f76",64772:"8b33418b",64885:"1f95b268",64927:"960d51ee",64949:"52ffd2c9",65014:"18a99462",65057:"b5978b91",65197:"63932d60",65201:"d5ccea93",65290:"b49d6695",65309:"b074cdf0",65437:"07404a3f",65544:"d72a4f75",65760:"7926750e",65808:"a6b12c86",66017:"58f22b3a",66185:"c79eca9c",66465:"c4321497",66553:"9908e4ce",66584:"2c9d8afa",66674:"47627822",66705:"69b35bde",66873:"ae76af99",66937:"b1b19d82",67235:"ee70323d",67317:"766d9300",67780:"4f2b4634",68065:"430f87ae",68281:"48a47da8",68505:"c9a07adb",68651:"061f8aa6",68798:"2ab61aa9",69012:"e0bcc580",69045:"9e6a3b67",69347:"1db547de",69778:"2d9f2718",70066:"f9ff8f33",70126:"ffc08d3e",70251:"275b3970",70269:"18e07bba",70419:"9825066b",70659:"13ec1e41",70971:"2840b71a",71075:"3a00e42c",71341:"d034f411",71695:"e46e3e4b",71786:"369a75df",71960:"6e67fd41",72024:"b2517c14",72104:"8bf5c4a6",72142:"9c9bd8d9",72146:"f23e6200",72157:"8697cd1b",72163:"871d93fa",72216:"8dfcd8d2",72371:"5b0bc988",72487:"51bec9b9",72747:"e6aaea6d",73033:"7ec9829d",73255:"e753ee18",73289:"fb393ba4",73297:"578d52fc",73333:"9b8fedf6",73544:"3bf081ca",73556:"0633dfe3",73736:"fa002262",73906:"2a0c2848",73997:"ed3354e6",74121:"3f5b9cdc",74231:"6a4650f8",74818:"b2143f3e",74939:"e0069315",75e3:"1e565904",75110:"4100dc62",75148:"bda08611",75216:"4dd080e3",75583:"9a9ae92e",75605:"229635d9",75692:"d0bce39b",76060:"63c1bac4",76363:"19b435c8",76437:"e524c582",76620:"d04f70ab",77426:"2a8f5b60",77902:"469e35f6",77952:"0f2eced3",78029:"077e8f2c",78330:"3b655924",78454:"243e9743",78485:"826d0f64",78761:"fb113da9",78980:"0f7558d6",79691:"b8c2fd0e",79694:"90bedd6e",79938:"f6177cf4",79999:"b2907541",80053:"74f42bde",80062:"fe21c522",80263:"a06ab1f9",80372:"1ee029b0",80733:"657cfd97",80748:"5a7edca5",81093:"49abe713",81786:"d1d272da",82060:"43ad112d",82173:"58f72bb4",82271:"42bf6897",82400:"ebfd081c",82444:"4c77c723",82641:"8c233587",82682:"ee82c36a",82717:"25829f78",82841:"3607f1bf",82859:"47584a73",83331:"e292aae4",83341:"df2e2a7a",83430:"dbae9ac3",83713:"eeeb7c66",84014:"c818c7b2",84340:"d766fee5",84905:"d463d264",85181:"0ee456e3",86015:"384a73b1",86037:"19da953b",86143:"d886d369",86264:"6bccbacf",86607:"b5bc064b",86743:"1a5aafa2",86932:"3b6982d0",87002:"a0929106",87054:"e59e7145",87164:"bed87687",87253:"65b66fb4",88290:"deebd8d3",88423:"6f24a30a",88426:"2467aaba",88463:"9dc845ad",88629:"49eb4573",88733:"9e1f5970",89339:"f6c096ac",89520:"4a5b6a95",89820:"70b849b0",89846:"4ca4c970",89926:"7d9186b8",90241:"61409d11",90330:"7df4a720",90391:"6efc9024",90406:"4f5c583b",90538:"2b02e08f",90867:"a25f0ead",91073:"aae3a5b6",91542:"16428814",91755:"d6618687",91977:"af6778c9",92074:"f51839a9",92132:"85988d9b",92489:"038ab22d",93089:"1ea8fcbb",93115:"99e790a3",93259:"3be856d7",93302:"28b09874",93377:"a6fd242d",93380:"df1c9a07",93591:"b351b1a8",93735:"28fb6c5a",93803:"488776d6",93962:"1bd3c69f",93979:"aa8617b2",94138:"fe89382e",94141:"d29d4e8d",94172:"cd23ba4f",94195:"fde22395",94424:"f52c6355",94439:"2149dc0e",94478:"9c5e9030",94629:"2eaf9893",94640:"085012a9",94791:"98522ec5",95012:"f0e1bf8f",95086:"fae61d1a",95260:"573f146c",95369:"87993913",96136:"63754c59",96188:"52a77d95",96247:"3fa61b10",96369:"31df64c6",96739:"522823a5",96938:"fc55850f",96992:"0a998dcd",97451:"91c6999c",97507:"79735d81",97597:"ed8ae9e8",97616:"1205e2d5",97622:"5a16c207",97689:"64d0c3a5",97892:"38c82c13",97899:"9dc5e1cd",98091:"f15d3941",98094:"b271d297",98381:"76ca2bf9",98647:"956d09f7",98651:"23f302c8",98953:"83d60e10",98959:"963763fc",99056:"93e03b8e",99181:"b04f2d24",99348:"7be329f9",99548:"361872e3",99553:"0e71de72",99646:"8962e152",99670:"0d439352",99722:"c78385f1",99924:"5ffd65fa",99944:"7deb33f6"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.e9faf9bb.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,c){return Object.prototype.hasOwnProperty.call(e,c)},a={},d="linkis-web-apache:",n.l=function(e,c,f,b){if(a[e])a[e].push(c);else{var t,r;if(void 0!==f)for(var o=document.getElementsByTagName("script"),i=0;i Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/02/08/how-to-user-blog/index.html b/zh-CN/blog/2022/02/08/how-to-user-blog/index.html index 7c6725f72d3..e195ed95874 100644 --- a/zh-CN/blog/2022/02/08/how-to-user-blog/index.html +++ b/zh-CN/blog/2022/02/08/how-to-user-blog/index.html @@ -7,7 +7,7 @@ 如何编写博客 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/02/21/linkis-deploy/index.html b/zh-CN/blog/2022/02/21/linkis-deploy/index.html index ef0b5feba6a..9dd8f99683e 100644 --- a/zh-CN/blog/2022/02/21/linkis-deploy/index.html +++ b/zh-CN/blog/2022/02/21/linkis-deploy/index.html @@ -7,7 +7,7 @@ Linkis 部署排障 | Apache Linkis - + @@ -78,7 +78,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/03/20/openlookeng/index.html b/zh-CN/blog/2022/03/20/openlookeng/index.html index 7bf21b2e61f..324a4f7ec62 100644 --- a/zh-CN/blog/2022/03/20/openlookeng/index.html +++ b/zh-CN/blog/2022/03/20/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLooKeng的引擎的实现 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html b/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html index 99b61493846..a6e14b7b8a9 100644 --- a/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html +++ b/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html @@ -7,7 +7,7 @@ 如何下载安装包中默认没有的引擎插件 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/06/09/meetup-content-review/index.html b/zh-CN/blog/2022/06/09/meetup-content-review/index.html index 0cab3f5dc22..36387c213d4 100644 --- a/zh-CN/blog/2022/06/09/meetup-content-review/index.html +++ b/zh-CN/blog/2022/06/09/meetup-content-review/index.html @@ -7,7 +7,7 @@ Apache Linkis(Incubating) Meep Up | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html b/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html index 6775315995f..0bc1a348bc4 100644 --- a/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html +++ b/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html @@ -7,7 +7,7 @@ 如何为github仓库添加一个github action | Apache Linkis - + @@ -37,7 +37,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html b/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html index 17e6702e3d9..6edd3ef8125 100644 --- a/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html +++ b/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html @@ -7,7 +7,7 @@ 部署Linkis到Kubernetes | Apache Linkis - + @@ -51,7 +51,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/archive/index.html b/zh-CN/blog/archive/index.html index 09c474abbbc..584829b0d78 100644 --- a/zh-CN/blog/archive/index.html +++ b/zh-CN/blog/archive/index.html @@ -7,7 +7,7 @@ 历史博文 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/index.html b/zh-CN/blog/index.html index fb5e35122ea..d7ff1a9ff03 100644 --- a/zh-CN/blog/index.html +++ b/zh-CN/blog/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -77,7 +77,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/page/2/index.html b/zh-CN/blog/page/2/index.html index 5c2e684e42c..eabf855d1bd 100644 --- a/zh-CN/blog/page/2/index.html +++ b/zh-CN/blog/page/2/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -78,7 +78,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/blog/index.html b/zh-CN/blog/tags/blog/index.html index b564d04909c..122c0e0449e 100644 --- a/zh-CN/blog/tags/blog/index.html +++ b/zh-CN/blog/tags/blog/index.html @@ -7,7 +7,7 @@ 1 篇博文 含有标签「blog | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/engine/index.html b/zh-CN/blog/tags/engine/index.html index 23530216508..e74a7143abe 100644 --- a/zh-CN/blog/tags/engine/index.html +++ b/zh-CN/blog/tags/engine/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「engine | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/github/index.html b/zh-CN/blog/tags/github/index.html index 9e5b4abccf4..38e3751d8c6 100644 --- a/zh-CN/blog/tags/github/index.html +++ b/zh-CN/blog/tags/github/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「github | Apache Linkis - + @@ -70,7 +70,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/guide/index.html b/zh-CN/blog/tags/guide/index.html index 1cfd42eb08d..00fb39a8e5e 100644 --- a/zh-CN/blog/tags/guide/index.html +++ b/zh-CN/blog/tags/guide/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「guide | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/index.html b/zh-CN/blog/tags/index.html index 540691b0bf4..6aed53d31cf 100644 --- a/zh-CN/blog/tags/index.html +++ b/zh-CN/blog/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/meetup/index.html b/zh-CN/blog/tags/meetup/index.html index 2a430b401f6..c88b10f90cd 100644 --- a/zh-CN/blog/tags/meetup/index.html +++ b/zh-CN/blog/tags/meetup/index.html @@ -7,7 +7,7 @@ 1 篇博文 含有标签「meetup | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/api/index.html b/zh-CN/community/development_specification/api/index.html index 35cb5be32a6..6944f491913 100644 --- a/zh-CN/community/development_specification/api/index.html +++ b/zh-CN/community/development_specification/api/index.html @@ -7,7 +7,7 @@ 接口规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/concurrent/index.html b/zh-CN/community/development_specification/concurrent/index.html index 6c911125e6e..e9108dd4f17 100644 --- a/zh-CN/community/development_specification/concurrent/index.html +++ b/zh-CN/community/development_specification/concurrent/index.html @@ -7,7 +7,7 @@ 并发规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/exception_catch/index.html b/zh-CN/community/development_specification/exception_catch/index.html index 97f517b7b66..038e7788f11 100644 --- a/zh-CN/community/development_specification/exception_catch/index.html +++ b/zh-CN/community/development_specification/exception_catch/index.html @@ -7,7 +7,7 @@ 异常规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/license/index.html b/zh-CN/community/development_specification/license/index.html index 100d37fa739..c6ea49d80d0 100644 --- a/zh-CN/community/development_specification/license/index.html +++ b/zh-CN/community/development_specification/license/index.html @@ -7,7 +7,7 @@ License 须知 | Apache Linkis - + @@ -39,7 +39,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/log/index.html b/zh-CN/community/development_specification/log/index.html index 78ada172c2e..8f30fffb7bc 100644 --- a/zh-CN/community/development_specification/log/index.html +++ b/zh-CN/community/development_specification/log/index.html @@ -7,7 +7,7 @@ 日志规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/overview/index.html b/zh-CN/community/development_specification/overview/index.html index 6be8c96ea95..b472e428d40 100644 --- a/zh-CN/community/development_specification/overview/index.html +++ b/zh-CN/community/development_specification/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/path_usage/index.html b/zh-CN/community/development_specification/path_usage/index.html index 7a5198278ec..b37abeac603 100644 --- a/zh-CN/community/development_specification/path_usage/index.html +++ b/zh-CN/community/development_specification/path_usage/index.html @@ -7,7 +7,7 @@ 路径规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/programming_specification/index.html b/zh-CN/community/development_specification/programming_specification/index.html index 88319f7a817..887bc9fb4b9 100644 --- a/zh-CN/community/development_specification/programming_specification/index.html +++ b/zh-CN/community/development_specification/programming_specification/index.html @@ -7,7 +7,7 @@ 编程规约 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/release-notes/index.html b/zh-CN/community/development_specification/release-notes/index.html index 7e584818a5b..f1a363cad85 100644 --- a/zh-CN/community/development_specification/release-notes/index.html +++ b/zh-CN/community/development_specification/release-notes/index.html @@ -7,7 +7,7 @@ Release-Notes 编写规范 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/unit_test/index.html b/zh-CN/community/development_specification/unit_test/index.html index 8f947fa602a..fd89647f721 100644 --- a/zh-CN/community/development_specification/unit_test/index.html +++ b/zh-CN/community/development_specification/unit_test/index.html @@ -7,7 +7,7 @@ 测试规约 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/version_feature_specifications/index.html b/zh-CN/community/development_specification/version_feature_specifications/index.html index 9923e1f2fdf..4d5b7341721 100644 --- a/zh-CN/community/development_specification/version_feature_specifications/index.html +++ b/zh-CN/community/development_specification/version_feature_specifications/index.html @@ -7,7 +7,7 @@ 版本和新特性规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-contribute-to-website/index.html b/zh-CN/community/how-to-contribute-to-website/index.html index b0c2e395255..bd305d92744 100644 --- a/zh-CN/community/how-to-contribute-to-website/index.html +++ b/zh-CN/community/how-to-contribute-to-website/index.html @@ -7,7 +7,7 @@ 如何参与官网贡献 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-contribute/index.html b/zh-CN/community/how-to-contribute/index.html index 9883cbdd099..52299e89974 100644 --- a/zh-CN/community/how-to-contribute/index.html +++ b/zh-CN/community/how-to-contribute/index.html @@ -7,7 +7,7 @@ 如何参与项目贡献 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-email/index.html b/zh-CN/community/how-to-email/index.html index 22361131ab9..8878d711262 100644 --- a/zh-CN/community/how-to-email/index.html +++ b/zh-CN/community/how-to-email/index.html @@ -7,7 +7,7 @@ 如何使用邮件列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-participate-in-developer-meetings/index.html b/zh-CN/community/how-to-participate-in-developer-meetings/index.html index 1463167b749..ca013234c3b 100644 --- a/zh-CN/community/how-to-participate-in-developer-meetings/index.html +++ b/zh-CN/community/how-to-participate-in-developer-meetings/index.html @@ -7,7 +7,7 @@ 如何参与开发者例会 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-release/index.html b/zh-CN/community/how-to-release/index.html index 6fb2314d12b..bd23816a375 100644 --- a/zh-CN/community/how-to-release/index.html +++ b/zh-CN/community/how-to-release/index.html @@ -7,7 +7,7 @@ 如何发布版本 | Apache Linkis - + @@ -147,7 +147,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-sign-apache-icla/index.html b/zh-CN/community/how-to-sign-apache-icla/index.html index 565a5365885..01ff5c1cd6f 100644 --- a/zh-CN/community/how-to-sign-apache-icla/index.html +++ b/zh-CN/community/how-to-sign-apache-icla/index.html @@ -7,7 +7,7 @@ ICLA 签署流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-subscribe/index.html b/zh-CN/community/how-to-subscribe/index.html index f212fee9d64..389a815f097 100644 --- a/zh-CN/community/how-to-subscribe/index.html +++ b/zh-CN/community/how-to-subscribe/index.html @@ -7,7 +7,7 @@ 订阅邮件列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-verify/index.html b/zh-CN/community/how-to-verify/index.html index 30c4815272a..2d6daf2e36b 100644 --- a/zh-CN/community/how-to-verify/index.html +++ b/zh-CN/community/how-to-verify/index.html @@ -7,7 +7,7 @@ 发布版本验证 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-vote-a-committer-ppmc/index.html b/zh-CN/community/how-to-vote-a-committer-ppmc/index.html index 36b08f26645..ae5e317b1e8 100644 --- a/zh-CN/community/how-to-vote-a-committer-ppmc/index.html +++ b/zh-CN/community/how-to-vote-a-committer-ppmc/index.html @@ -7,7 +7,7 @@ 如何提名新的Committer 和 PPMC | Apache Linkis - + @@ -50,7 +50,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-write-unit-test-code/index.html b/zh-CN/community/how-to-write-unit-test-code/index.html index ee49c06f81f..9c2f20fd753 100644 --- a/zh-CN/community/how-to-write-unit-test-code/index.html +++ b/zh-CN/community/how-to-write-unit-test-code/index.html @@ -7,7 +7,7 @@ 如何编写单元测试代码 | Apache Linkis - + @@ -61,7 +61,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/microservice-division/index.html b/zh-CN/community/microservice-division/index.html index dd83e3319e7..be36f0fd8d9 100644 --- a/zh-CN/community/microservice-division/index.html +++ b/zh-CN/community/microservice-division/index.html @@ -7,7 +7,7 @@ 微服务的划分 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/ppmc-related-permission-configuration/index.html b/zh-CN/community/ppmc-related-permission-configuration/index.html index 5a81878243d..600b04edc18 100644 --- a/zh-CN/community/ppmc-related-permission-configuration/index.html +++ b/zh-CN/community/ppmc-related-permission-configuration/index.html @@ -7,7 +7,7 @@ PPMC/Committer 相关权限配置 | Apache Linkis - + @@ -30,7 +30,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/security/index.html b/zh-CN/community/security/index.html index e571da676a9..63df6615454 100644 --- a/zh-CN/community/security/index.html +++ b/zh-CN/community/security/index.html @@ -7,7 +7,7 @@ 安全 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/login_api/index.html b/zh-CN/docs/0.11.0/api/login_api/index.html index a92320ab059..7dc301da0d0 100644 --- a/zh-CN/docs/0.11.0/api/login_api/index.html +++ b/zh-CN/docs/0.11.0/api/login_api/index.html @@ -7,7 +7,7 @@ 登陆 Api | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/rest_api/index.html b/zh-CN/docs/0.11.0/api/rest_api/index.html index 3d105acf87b..723da44c3a2 100644 --- a/zh-CN/docs/0.11.0/api/rest_api/index.html +++ b/zh-CN/docs/0.11.0/api/rest_api/index.html @@ -7,7 +7,7 @@ Restful Api | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/web_socket/index.html b/zh-CN/docs/0.11.0/api/web_socket/index.html index 33f2ef1653b..72cbe0f6f2f 100644 --- a/zh-CN/docs/0.11.0/api/web_socket/index.html +++ b/zh-CN/docs/0.11.0/api/web_socket/index.html @@ -7,7 +7,7 @@ WebSocket | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html b/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html index eae2b851b6c..bc9de49cdad 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html @@ -7,7 +7,7 @@ 异步日志实时推送 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html b/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html index 55ce74ef4d8..ac029b7c794 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html b/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html index 2e1c5b387ef..a3ac79534c0 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html @@ -7,7 +7,7 @@ Scheduler架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/overview/index.html b/zh-CN/docs/0.11.0/architecture/overview/index.html index 9d5cdde9173..49880aa816c 100644 --- a/zh-CN/docs/0.11.0/architecture/overview/index.html +++ b/zh-CN/docs/0.11.0/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/rm/index.html b/zh-CN/docs/0.11.0/architecture/rm/index.html index 365f27b501c..d62804e137b 100644 --- a/zh-CN/docs/0.11.0/architecture/rm/index.html +++ b/zh-CN/docs/0.11.0/architecture/rm/index.html @@ -7,7 +7,7 @@ RM 设计 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html b/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html index b066af04f84..031a9701cd7 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html @@ -7,7 +7,7 @@ 对接多种文件系统 | Apache Linkis - + @@ -27,7 +27,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html b/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html index bc09c22b2b3..048a5c0de69 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html @@ -7,7 +7,7 @@ 访问远程文件系统架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html b/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html index f1caa45cebc..a53b1f105dd 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html @@ -7,7 +7,7 @@ 结果集文件存储 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html b/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html index 6ffb2113b87..cc259b93c6b 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html @@ -7,7 +7,7 @@ 全异步线程池调用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html b/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html index 204fcbd5d49..2c708756094 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html @@ -7,7 +7,7 @@ Spark引擎的文件导入导出 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html b/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html index a6d0ef1bdf9..c90b4a1b350 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html @@ -7,7 +7,7 @@ UJES设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/websocket/index.html b/zh-CN/docs/0.11.0/architecture/websocket/index.html index 4abfed708c7..9eb641c988d 100644 --- a/zh-CN/docs/0.11.0/architecture/websocket/index.html +++ b/zh-CN/docs/0.11.0/architecture/websocket/index.html @@ -7,7 +7,7 @@ WebSocket请求转发实现 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html index f29ac5b7c31..b5941aef62d 100644 --- a/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html b/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html index 66e73d9471c..8e87f06870a 100644 --- a/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html +++ b/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html @@ -7,7 +7,7 @@ 生产部署参考指南 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html b/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html index f2f1b647708..aa9a187bd11 100644 --- a/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html +++ b/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/quick_start/index.html b/zh-CN/docs/0.11.0/deployment/quick_start/index.html index 859734449c4..53ef760421e 100644 --- a/zh-CN/docs/0.11.0/deployment/quick_start/index.html +++ b/zh-CN/docs/0.11.0/deployment/quick_start/index.html @@ -7,7 +7,7 @@ 快速启动 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html index 9d12dbdaf56..135b2350eba 100644 --- a/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/compile_and_package/index.html b/zh-CN/docs/0.11.0/development/compile_and_package/index.html index 36273eadda4..a48c42d477d 100644 --- a/zh-CN/docs/0.11.0/development/compile_and_package/index.html +++ b/zh-CN/docs/0.11.0/development/compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/install-server/index.html b/zh-CN/docs/0.11.0/development/install-server/index.html index e5d5d1e2283..39a788d270e 100644 --- a/zh-CN/docs/0.11.0/development/install-server/index.html +++ b/zh-CN/docs/0.11.0/development/install-server/index.html @@ -7,7 +7,7 @@ 单个服务的安装 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/new_engine_conn/index.html b/zh-CN/docs/0.11.0/development/new_engine_conn/index.html index c3a713ea0ad..c8d735d829c 100644 --- a/zh-CN/docs/0.11.0/development/new_engine_conn/index.html +++ b/zh-CN/docs/0.11.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/start-server/index.html b/zh-CN/docs/0.11.0/development/start-server/index.html index 482dafb395f..a3d453000bc 100644 --- a/zh-CN/docs/0.11.0/development/start-server/index.html +++ b/zh-CN/docs/0.11.0/development/start-server/index.html @@ -7,7 +7,7 @@ 单个服务的启动 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/hive/index.html b/zh-CN/docs/0.11.0/engine_usage/hive/index.html index e3edf6c7a2d..01c9e591d03 100644 --- a/zh-CN/docs/0.11.0/engine_usage/hive/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/python/index.html b/zh-CN/docs/0.11.0/engine_usage/python/index.html index 191e69fd4fc..953573a4032 100644 --- a/zh-CN/docs/0.11.0/engine_usage/python/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/spark/index.html b/zh-CN/docs/0.11.0/engine_usage/spark/index.html index 491f895e19b..990cfca0c2c 100644 --- a/zh-CN/docs/0.11.0/engine_usage/spark/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/introduction/index.html b/zh-CN/docs/0.11.0/introduction/index.html index 02fef94b428..c89d2a5dbf4 100644 --- a/zh-CN/docs/0.11.0/introduction/index.html +++ b/zh-CN/docs/0.11.0/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/tags/index.html b/zh-CN/docs/0.11.0/tags/index.html index e02b641a883..cf78a168cf9 100644 --- a/zh-CN/docs/0.11.0/tags/index.html +++ b/zh-CN/docs/0.11.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html b/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html index c18a5f6030b..f3490b4b178 100644 --- a/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html +++ b/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html @@ -7,7 +7,7 @@ 0.9.0 升级 0.9.1 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html b/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html index cb51271589e..e3af5c422ef 100644 --- a/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html +++ b/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html @@ -7,7 +7,7 @@ 1.0 SDK的使用 | Apache Linkis - + @@ -60,7 +60,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html b/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html index f8594acb92d..23a264964f6 100644 --- a/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html +++ b/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html @@ -7,7 +7,7 @@ 0.X SDK的使用 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/jdbc_api/index.html b/zh-CN/docs/1.0.2/api/jdbc_api/index.html index 47db69eff23..74ac7c4825a 100644 --- a/zh-CN/docs/1.0.2/api/jdbc_api/index.html +++ b/zh-CN/docs/1.0.2/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行JDBC API文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html b/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html index 2611c4735d2..07e0815bdf2 100644 --- a/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/login_api/index.html b/zh-CN/docs/1.0.2/api/login_api/index.html index 4cd08347a99..ed6d7729d55 100644 --- a/zh-CN/docs/1.0.2/api/login_api/index.html +++ b/zh-CN/docs/1.0.2/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/overview/index.html b/zh-CN/docs/1.0.2/api/overview/index.html index 865634ef5a3..616969cd6e2 100644 --- a/zh-CN/docs/1.0.2/api/overview/index.html +++ b/zh-CN/docs/1.0.2/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html index 3c667da7430..85e9755b7fd 100644 --- a/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html index 01ca86d6323..a768dfb8eb6 100644 --- a/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html b/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html index eb5bf2627d1..ceb5cc2e750 100644 --- a/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html index 953a8b4c0d6..65e0b66a738 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html index bfeb02ffc87..f27e874d76c 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 4b27a441a66..fe8f988049b 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html index 518ae405d1e..45648631456 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html index df6131cd33c..bf9fc5426ff 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html index d7e8d7da95b..fe69d0664b8 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 0e9381ce88f..760926f122e 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html index 9e879256c40..2de5c1d3cf0 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 1e2daf19aff..63de5220bdc 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html index 0d1548d4685..0f595bc8410 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html index b4a1d367fdc..5ed765a7e64 100644 --- a/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html index f203aa28dfd..23cc0c808bd 100644 --- a/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html index 9ca1cd64948..7ed7b00201b 100644 --- a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html index 6dedb365f0e..ebe742b6600 100644 --- a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/overview/index.html b/zh-CN/docs/1.0.2/architecture/overview/index.html index 656cf483745..5819d8bfd7e 100644 --- a/zh-CN/docs/1.0.2/architecture/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html index 75bfed141f0..5a29dcd2d73 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML 物料库架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html index ac05d7a4b06..64fcedbe252 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html index fa08ef5107b..dbc3001d860 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html index c161378226c..8d871751c07 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index abb27d81183..8042c2f485e 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html index cd7f3931e84..579d3bd06c5 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index dd508fe212b..b7d42fedc29 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html index 8ed27692631..02b9edfc220 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html index 993f99ab54c..971190b89bb 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html index fdc188e00a2..3ae626f7486 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html index 1bfa1c59357..7d5240a400d 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/contact/index.html b/zh-CN/docs/1.0.2/contact/index.html index 0547216c7c9..b6295612e68 100644 --- a/zh-CN/docs/1.0.2/contact/index.html +++ b/zh-CN/docs/1.0.2/contact/index.html @@ -7,7 +7,7 @@ 联系我们 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html b/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html index 89db32b65d8..32b7937087d 100644 --- a/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html index 06d6470d771..f4ecc4dc41e 100644 --- a/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html index 13fa1703c83..38409f80aac 100644 --- a/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html b/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html index 3da93ddc563..a801b711e28 100644 --- a/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html index 861b054ce5a..94b979f170a 100644 --- a/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/web_install/index.html b/zh-CN/docs/1.0.2/deployment/web_install/index.html index 5c5ceaaffb7..b664a6fcdf4 100644 --- a/zh-CN/docs/1.0.2/deployment/web_install/index.html +++ b/zh-CN/docs/1.0.2/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html index 9d0e2646766..efb8e6d06e2 100644 --- a/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/linkis_debug/index.html b/zh-CN/docs/1.0.2/development/linkis_debug/index.html index 6fdd29f2fbf..f057e71decb 100644 --- a/zh-CN/docs/1.0.2/development/linkis_debug/index.html +++ b/zh-CN/docs/1.0.2/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/new_engine_conn/index.html b/zh-CN/docs/1.0.2/development/new_engine_conn/index.html index b90ba2e8ab2..a507c41bd80 100644 --- a/zh-CN/docs/1.0.2/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.0.2/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/web_build/index.html b/zh-CN/docs/1.0.2/development/web_build/index.html index 7381897129b..612c7422e0c 100644 --- a/zh-CN/docs/1.0.2/development/web_build/index.html +++ b/zh-CN/docs/1.0.2/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/hive/index.html b/zh-CN/docs/1.0.2/engine_usage/hive/index.html index 16240c650e6..03ebfdb012d 100644 --- a/zh-CN/docs/1.0.2/engine_usage/hive/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html b/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html index 055663d33c2..fb66d1f8c6b 100644 --- a/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/overview/index.html b/zh-CN/docs/1.0.2/engine_usage/overview/index.html index 565ebb97454..a237ec7b0c1 100644 --- a/zh-CN/docs/1.0.2/engine_usage/overview/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/python/index.html b/zh-CN/docs/1.0.2/engine_usage/python/index.html index 2f8f4f95af7..c9d72951f7f 100644 --- a/zh-CN/docs/1.0.2/engine_usage/python/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/shell/index.html b/zh-CN/docs/1.0.2/engine_usage/shell/index.html index f79f4ef6c5a..e3c87f54fba 100644 --- a/zh-CN/docs/1.0.2/engine_usage/shell/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/spark/index.html b/zh-CN/docs/1.0.2/engine_usage/spark/index.html index 19473bd0639..43b6bee1ebe 100644 --- a/zh-CN/docs/1.0.2/engine_usage/spark/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/introduction/index.html b/zh-CN/docs/1.0.2/introduction/index.html index 1581cb5cca4..9a7c3411354 100644 --- a/zh-CN/docs/1.0.2/introduction/index.html +++ b/zh-CN/docs/1.0.2/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tags/index.html b/zh-CN/docs/1.0.2/tags/index.html index cdf5adc687d..52cd3310a3d 100644 --- a/zh-CN/docs/1.0.2/tags/index.html +++ b/zh-CN/docs/1.0.2/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html index e78345b0618..baf616e3454 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html index 8fab19064b5..cf278524d7b 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html index 3a68c2f69cc..ef8d8d34d08 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/upgrade/overview/index.html b/zh-CN/docs/1.0.2/upgrade/overview/index.html index 9d756da722a..a486b937d83 100644 --- a/zh-CN/docs/1.0.2/upgrade/overview/index.html +++ b/zh-CN/docs/1.0.2/upgrade/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 628d75cdb56..5093d203448 100644 --- a/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 1.0升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/console_manual/index.html b/zh-CN/docs/1.0.2/user_guide/console_manual/index.html index cbfedb7eb51..ec702caa90c 100644 --- a/zh-CN/docs/1.0.2/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html b/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html index b7a10c85c20..7f73c600d27 100644 --- a/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html index bc809445b8a..a8fa171781d 100644 --- a/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/overview/index.html b/zh-CN/docs/1.0.2/user_guide/overview/index.html index 7dd5e5f1199..1d1be413c83 100644 --- a/zh-CN/docs/1.0.2/user_guide/overview/index.html +++ b/zh-CN/docs/1.0.2/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html b/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html index 32d182072bf..2c0a7cb06b3 100644 --- a/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/jdbc_api/index.html b/zh-CN/docs/1.0.3/api/jdbc_api/index.html index 2de3cafe2db..a3c0ec8c6df 100644 --- a/zh-CN/docs/1.0.3/api/jdbc_api/index.html +++ b/zh-CN/docs/1.0.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html b/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html index 811d131abdb..240ee49837c 100644 --- a/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/login_api/index.html b/zh-CN/docs/1.0.3/api/login_api/index.html index c45d9f0217e..d98b245a6dc 100644 --- a/zh-CN/docs/1.0.3/api/login_api/index.html +++ b/zh-CN/docs/1.0.3/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/overview/index.html b/zh-CN/docs/1.0.3/api/overview/index.html index ed15acd67a0..eb1bb85237a 100644 --- a/zh-CN/docs/1.0.3/api/overview/index.html +++ b/zh-CN/docs/1.0.3/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html index 9b3fb867ad5..56b1e55275b 100644 --- a/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html index 8ff6411e7a5..dc47cd5f744 100644 --- a/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html b/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html index 752a3600e30..e98162e2a88 100644 --- a/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html index e1cf0a7d54f..e91de45f597 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index cec10414af2..98633f87709 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index fabba3b1568..eae298ec54f 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html index ecef68bef91..a38faf6765b 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html index 3b90bacfae7..c8c7af47858 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index dcc8a745b7e..77469c8abe4 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 9285dd31431..9b4f92db572 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 145ff78e161..a02040d1439 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index e9f4e5e4f8d..872d1b071fc 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html index 83af59f8d5b..cec256daa1a 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html index b03434c70dd..46a939b6adc 100644 --- a/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html index 8294df8c6b5..fe6c087b8a2 100644 --- a/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html index 9398efecb6d..9235d9dde93 100644 --- a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html index 40e76d3701f..25be295d0b6 100644 --- a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/overview/index.html b/zh-CN/docs/1.0.3/architecture/overview/index.html index e661b06349e..10e5fb42437 100644 --- a/zh-CN/docs/1.0.3/architecture/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html index cf89e711937..550c1febef1 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML 物料库架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html index ab82cadbe74..e8bfc622d4c 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 06c872a2244..3a1f8765ce8 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index 60652b64639..86c5626c783 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index a919014e8a5..3b32c6f4e3a 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 3d2d329500a..2df3894420c 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index f3cbd88111c..b7048fbc6b3 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index 05687403573..ef71fe38b7b 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html index 41d58beb118..c5fcaa23eec 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html index 10ba1a807a7..f936d694a7a 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html index fe83ce2cba5..dc0bf2e46f8 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/contact/index.html b/zh-CN/docs/1.0.3/contact/index.html index 1439a0f63ed..beaadf5aebe 100644 --- a/zh-CN/docs/1.0.3/contact/index.html +++ b/zh-CN/docs/1.0.3/contact/index.html @@ -7,7 +7,7 @@ 联系我们 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html b/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html index 5518fd2ec01..3c4f511a854 100644 --- a/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html index 11c03c90bda..518df04b9d1 100644 --- a/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html index ea204c6e26c..c655e40f683 100644 --- a/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html b/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html index 1d1c004dfa7..855d7ab3850 100644 --- a/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html index d55bcc7561b..4026cc2a407 100644 --- a/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/web_install/index.html b/zh-CN/docs/1.0.3/deployment/web_install/index.html index 81b7c7fd724..57904dcb553 100644 --- a/zh-CN/docs/1.0.3/deployment/web_install/index.html +++ b/zh-CN/docs/1.0.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html index d2aa9c304a5..8a4d23b7d20 100644 --- a/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/linkis_debug/index.html b/zh-CN/docs/1.0.3/development/linkis_debug/index.html index b28e42bcd35..7e918266096 100644 --- a/zh-CN/docs/1.0.3/development/linkis_debug/index.html +++ b/zh-CN/docs/1.0.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/new_engine_conn/index.html b/zh-CN/docs/1.0.3/development/new_engine_conn/index.html index 60e8a98ea35..64abd16117e 100644 --- a/zh-CN/docs/1.0.3/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.0.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html b/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html index e88b3fe8171..59757a9bf46 100644 --- a/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html +++ b/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html @@ -7,7 +7,7 @@ SpringMVC 替换 Jersey 分享 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/web_build/index.html b/zh-CN/docs/1.0.3/development/web_build/index.html index 9c065bfe2d7..5f2b8223882 100644 --- a/zh-CN/docs/1.0.3/development/web_build/index.html +++ b/zh-CN/docs/1.0.3/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/flink/index.html b/zh-CN/docs/1.0.3/engine_usage/flink/index.html index daafcf25fd6..f5505984cf6 100644 --- a/zh-CN/docs/1.0.3/engine_usage/flink/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/hive/index.html b/zh-CN/docs/1.0.3/engine_usage/hive/index.html index 26e093f9b77..ad9dd29c0c4 100644 --- a/zh-CN/docs/1.0.3/engine_usage/hive/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html b/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html index 8d5b207c7b0..d36ffa3f32f 100644 --- a/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/overview/index.html b/zh-CN/docs/1.0.3/engine_usage/overview/index.html index 5afb383eaba..3a4f0c5c815 100644 --- a/zh-CN/docs/1.0.3/engine_usage/overview/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/python/index.html b/zh-CN/docs/1.0.3/engine_usage/python/index.html index 8db05eaebed..2d3d63ebc4f 100644 --- a/zh-CN/docs/1.0.3/engine_usage/python/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/shell/index.html b/zh-CN/docs/1.0.3/engine_usage/shell/index.html index f011812533c..c423280a5a9 100644 --- a/zh-CN/docs/1.0.3/engine_usage/shell/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/spark/index.html b/zh-CN/docs/1.0.3/engine_usage/spark/index.html index ecd88bcaa62..ec54792d608 100644 --- a/zh-CN/docs/1.0.3/engine_usage/spark/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/introduction/index.html b/zh-CN/docs/1.0.3/introduction/index.html index e8c6d084802..b5551d20595 100644 --- a/zh-CN/docs/1.0.3/introduction/index.html +++ b/zh-CN/docs/1.0.3/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tags/index.html b/zh-CN/docs/1.0.3/tags/index.html index 418629183b8..2e120063e6c 100644 --- a/zh-CN/docs/1.0.3/tags/index.html +++ b/zh-CN/docs/1.0.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html index 29e84f4f19d..080563dc839 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html index 99d3ec6d0a4..b469a75a355 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html index a973520f625..613444dff05 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/upgrade/overview/index.html b/zh-CN/docs/1.0.3/upgrade/overview/index.html index 5741cc5c379..a5c90f10b05 100644 --- a/zh-CN/docs/1.0.3/upgrade/overview/index.html +++ b/zh-CN/docs/1.0.3/upgrade/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 44c011e872d..30473943419 100644 --- a/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 1.0升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/console_manual/index.html b/zh-CN/docs/1.0.3/user_guide/console_manual/index.html index 2447c95d0b4..c6af85f0809 100644 --- a/zh-CN/docs/1.0.3/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html b/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html index b569afbf9ff..c08d102f754 100644 --- a/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html index 2c24783a1ef..416151bb05d 100644 --- a/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/overview/index.html b/zh-CN/docs/1.0.3/user_guide/overview/index.html index 2b4c3b26bee..404deaf38b8 100644 --- a/zh-CN/docs/1.0.3/user_guide/overview/index.html +++ b/zh-CN/docs/1.0.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html b/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html index 14d4a94d02d..5322796d049 100644 --- a/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html b/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html index 13fbc2dfce2..787f9d4c7f2 100644 --- a/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源接口 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html b/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html index 641afefb19f..a2b81aa6391 100644 --- a/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/jdbc_api/index.html b/zh-CN/docs/1.1.0/api/jdbc_api/index.html index 4cdb55d0ce0..ce267d1ee9c 100644 --- a/zh-CN/docs/1.1.0/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.0/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html index dc6c9047e07..225c1374fc7 100644 --- a/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/login_api/index.html b/zh-CN/docs/1.1.0/api/login_api/index.html index b46be37cf17..eb050bdd751 100644 --- a/zh-CN/docs/1.1.0/api/login_api/index.html +++ b/zh-CN/docs/1.1.0/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/overview/index.html b/zh-CN/docs/1.1.0/api/overview/index.html index 53567824dd2..ee2669f929a 100644 --- a/zh-CN/docs/1.1.0/api/overview/index.html +++ b/zh-CN/docs/1.1.0/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html index 1d83cbf9f75..9ddace45d92 100644 --- a/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html index 2539281a0dc..6d63b0d5308 100644 --- a/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html index 4624a36efe4..6b9016f8099 100644 --- a/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html index b335455d449..5b679fd1a54 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html index fa4cf7c7a8a..3f4eb05c2d7 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index ab93d9320be..dd7d7174f5d 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html index 3f445329086..133e5fa8f05 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html index bda8943ee2a..f9832a16475 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html index df2b39a350b..384386b570d 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html index fb6eda0d8ae..4710e16ce25 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html index e4eee518656..89660e3df48 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index c17116f3257..0122a592c8a 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html index 0f1a258e291..84cb3d16b2c 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html index e15150a1c03..945435b1f37 100644 --- a/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html index 06be0a62fd5..715cf637087 100644 --- a/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html index abeca1d4308..0527602a536 100644 --- a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html index 461e85ea366..1a5097e1b98 100644 --- a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/overview/index.html b/zh-CN/docs/1.1.0/architecture/overview/index.html index 795a37d537d..901b3aa12ee 100644 --- a/zh-CN/docs/1.1.0/architecture/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 9cd79bf5d2f..cd98c0c3f79 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html index 55da27d8287..bedb551a188 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html index b4708332d58..272d6ac8f76 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 00b224f5a0d..5dda096d60a 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html index 68e7b06646c..354a588bfcb 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 88386013de7..b973eaed218 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 0c088b58a35..5fefdd19f74 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 9e1ce94c40b..12e4472adfe 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html index 3788ae85243..1c7e36ee4b4 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html index d08ce1a20bb..c9ece8f3b7a 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html index 26048d8e28e..13db63ea2d8 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html index 2b60dd5fbf5..8a1967877ea 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html index a90c5e47e6a..929c0bf22fb 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html index b9351d1b7e9..fc1e2d580aa 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html index 5f62544354d..fa8ee2e8bff 100644 --- a/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html index e2af2f1dfee..1b53956edd4 100644 --- a/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html index 73750a17909..db4adc0a0a1 100644 --- a/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html index d4a16faa4ab..7cd668eef30 100644 --- a/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html index 998f3e2b6ea..802f63ea4cd 100644 --- a/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html index 14f07ac97c5..dd7e761158d 100644 --- a/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html index 63963808ec4..ba495b06fb9 100644 --- a/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/web_install/index.html b/zh-CN/docs/1.1.0/deployment/web_install/index.html index 77ae4a5d158..deaf078e6be 100644 --- a/zh-CN/docs/1.1.0/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.0/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html index 245f31792e7..f409a6e0e29 100644 --- a/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_config/index.html b/zh-CN/docs/1.1.0/development/linkis_config/index.html index 6b18dc3fcce..a473e2aa31e 100644 --- a/zh-CN/docs/1.1.0/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_debug/index.html b/zh-CN/docs/1.1.0/development/linkis_debug/index.html index 3a71f694ccc..84587ba3662 100644 --- a/zh-CN/docs/1.1.0/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html index 12bc799d6ab..9ed3c43cd4f 100644 --- a/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/new_engine_conn/index.html b/zh-CN/docs/1.1.0/development/new_engine_conn/index.html index 58cd9a346a4..c66223040a3 100644 --- a/zh-CN/docs/1.1.0/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/web_build/index.html b/zh-CN/docs/1.1.0/development/web_build/index.html index 3d736a485a6..d51bd4614ab 100644 --- a/zh-CN/docs/1.1.0/development/web_build/index.html +++ b/zh-CN/docs/1.1.0/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/flink/index.html b/zh-CN/docs/1.1.0/engine_usage/flink/index.html index 67dfca76a4f..2f30d357698 100644 --- a/zh-CN/docs/1.1.0/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/hive/index.html b/zh-CN/docs/1.1.0/engine_usage/hive/index.html index eafde55442a..68e4b960b6c 100644 --- a/zh-CN/docs/1.1.0/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html index f864c5a438b..b547aad8e3e 100644 --- a/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/overview/index.html b/zh-CN/docs/1.1.0/engine_usage/overview/index.html index d7dbe3f2a80..639fcb12b5d 100644 --- a/zh-CN/docs/1.1.0/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/python/index.html b/zh-CN/docs/1.1.0/engine_usage/python/index.html index 49edfbb4b43..3ade5b1db9c 100644 --- a/zh-CN/docs/1.1.0/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/shell/index.html b/zh-CN/docs/1.1.0/engine_usage/shell/index.html index 8657565c3ee..ac036f14967 100644 --- a/zh-CN/docs/1.1.0/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/spark/index.html b/zh-CN/docs/1.1.0/engine_usage/spark/index.html index cd33099ff63..cb313febfdb 100644 --- a/zh-CN/docs/1.1.0/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/introduction/index.html b/zh-CN/docs/1.1.0/introduction/index.html index 39e69c500f1..0d80c60a19d 100644 --- a/zh-CN/docs/1.1.0/introduction/index.html +++ b/zh-CN/docs/1.1.0/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/release/index.html b/zh-CN/docs/1.1.0/release/index.html index 983cba9310e..2f104d8b3aa 100644 --- a/zh-CN/docs/1.1.0/release/index.html +++ b/zh-CN/docs/1.1.0/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tags/index.html b/zh-CN/docs/1.1.0/tags/index.html index be2d1d34a02..325f389acf2 100644 --- a/zh-CN/docs/1.1.0/tags/index.html +++ b/zh-CN/docs/1.1.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html index e628e09a755..c65d4b3c22a 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html index 93a316db503..e1efb3b6c47 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html index f3005a629f9..563cd2fcb22 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 3db485276b1..da5427ed126 100644 --- a/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html index 6d2377eeb4c..4c4d774e76a 100644 --- a/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/console_manual/index.html b/zh-CN/docs/1.1.0/user_guide/console_manual/index.html index f43043b8aa0..766aae0a81b 100644 --- a/zh-CN/docs/1.1.0/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html index 0fab15bb1ca..c2492418d15 100644 --- a/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html index 4ef75490d73..cfde5fffaf2 100644 --- a/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html index 707c8d3b35b..4c01bda5f10 100644 --- a/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/overview/index.html b/zh-CN/docs/1.1.0/user_guide/overview/index.html index 1314474b7ca..6684d37bf3f 100644 --- a/zh-CN/docs/1.1.0/user_guide/overview/index.html +++ b/zh-CN/docs/1.1.0/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html index 9ab0ca8f3c5..03cd62352b4 100644 --- a/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html b/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html index f9988837096..5037afc0285 100644 --- a/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源接口 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html b/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html index 02beb004415..e0385d0faba 100644 --- a/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新接口 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html b/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html index d196d79754b..6a685ef032f 100644 --- a/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/udf-api/index.html b/zh-CN/docs/1.1.1/api/http/udf-api/index.html index 41872674200..dd74be0968e 100644 --- a/zh-CN/docs/1.1.1/api/http/udf-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/udf-api/index.html @@ -7,7 +7,7 @@ UDF接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/jdbc_api/index.html b/zh-CN/docs/1.1.1/api/jdbc_api/index.html index 5fdbdafb00f..a4f91942929 100644 --- a/zh-CN/docs/1.1.1/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.1/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html index f66f0f69d0e..37e2b90fcc1 100644 --- a/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/login_api/index.html b/zh-CN/docs/1.1.1/api/login_api/index.html index 889153136fd..43d464d0408 100644 --- a/zh-CN/docs/1.1.1/api/login_api/index.html +++ b/zh-CN/docs/1.1.1/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/overview/index.html b/zh-CN/docs/1.1.1/api/overview/index.html index e49c6cd8b7d..9160f131faf 100644 --- a/zh-CN/docs/1.1.1/api/overview/index.html +++ b/zh-CN/docs/1.1.1/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html index f24367a43f3..1eb6bf57b33 100644 --- a/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html index 9033524a65a..80e270f38a7 100644 --- a/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html index 4d6ffb871ad..5933fad7be2 100644 --- a/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html index ba731dccb9f..2157639b0d3 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html index f92bf75af44..8d4c03a5c5d 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index bc2bdd7aa92..d7edae19e19 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html index e7758a539ac..b7d0ab7513c 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html index 205d40aab33..9b56b554e88 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 5bee21e7f75..1d1c2752817 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 4b0ce20b641..1e801123f3a 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html index 4cec652ded9..6d825822a54 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 728c3e62a78..591000f4e95 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html index bf2ecbdd6bf..a1b340e1c04 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html index a7205403351..042c06aed5d 100644 --- a/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html index ae155d83bdc..d1b4ed724a2 100644 --- a/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html index d91fa0c41a8..9b41f2fc9aa 100644 --- a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html index f2902770b34..516b1176143 100644 --- a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/overview/index.html b/zh-CN/docs/1.1.1/architecture/overview/index.html index 7fc4fc225ea..2f6d3b201c6 100644 --- a/zh-CN/docs/1.1.1/architecture/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/proxy_user/index.html b/zh-CN/docs/1.1.1/architecture/proxy_user/index.html index d53dfbd1fba..b06b81a3804 100644 --- a/zh-CN/docs/1.1.1/architecture/proxy_user/index.html +++ b/zh-CN/docs/1.1.1/architecture/proxy_user/index.html @@ -7,7 +7,7 @@ 代理用户模式 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index dffc7c3284c..3d8fef14ecd 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html index c989bad8bc2..6ea1182ad04 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html index 9fd4eb450a0..1f9e395f096 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 9f2edcc1989..438dfbae283 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html index 8d52417efa3..43480f80364 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 33dc4ae0065..4a2b4bc9b8b 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 45b53f3259f..86e101a15cd 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 60f5685eca1..400ca8fd1a0 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html index ed1ef126352..f1eaee5aa9e 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html index 85e4702ab36..901aa125ee8 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html index 56333e6bde5..cb8a8d85e81 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html index 8c432c26231..0c6ea94aad6 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html index 2aff2990f6c..faa9d9e8d40 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html index d6bac79c081..26153844cd9 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html index 70389e7fa32..ed6cdd2a02c 100644 --- a/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html index e692ac4bbcc..ecf346b23b8 100644 --- a/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html index ad53c54b486..d04f3a83b8d 100644 --- a/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html index c1d8ace4474..97ef7599731 100644 --- a/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html index 14d51bee2de..2162ffd99f1 100644 --- a/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具Scriptis的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html index 48d5ab35495..d8093a52fc4 100644 --- a/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -76,7 +76,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html index 9fbe57e6a38..01cd2918220 100644 --- a/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html index 33c04af217b..fe9bd92105b 100644 --- a/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html index 2e826677087..d53fc136ecd 100644 --- a/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/web_install/index.html b/zh-CN/docs/1.1.1/deployment/web_install/index.html index 982980a20e2..86bd8d643b6 100644 --- a/zh-CN/docs/1.1.1/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.1/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html index 7f5159fea96..f9570aa8a2a 100644 --- a/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_config/index.html b/zh-CN/docs/1.1.1/development/linkis_config/index.html index efb68e3ad03..6952ee9d46a 100644 --- a/zh-CN/docs/1.1.1/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_debug/index.html b/zh-CN/docs/1.1.1/development/linkis_debug/index.html index 2830a108a62..eab6d683f16 100644 --- a/zh-CN/docs/1.1.1/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -51,7 +51,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html index 26f01e7215d..060cc934680 100644 --- a/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/new_engine_conn/index.html b/zh-CN/docs/1.1.1/development/new_engine_conn/index.html index a89623acdba..5de8d0cb2b6 100644 --- a/zh-CN/docs/1.1.1/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.1/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/web_build/index.html b/zh-CN/docs/1.1.1/development/web_build/index.html index c9ba0375fee..25166b009d0 100644 --- a/zh-CN/docs/1.1.1/development/web_build/index.html +++ b/zh-CN/docs/1.1.1/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/flink/index.html b/zh-CN/docs/1.1.1/engine_usage/flink/index.html index be14dfacc9c..e347181a32a 100644 --- a/zh-CN/docs/1.1.1/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/hive/index.html b/zh-CN/docs/1.1.1/engine_usage/hive/index.html index 23e67bc2adb..dfd1da9a98d 100644 --- a/zh-CN/docs/1.1.1/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html index d2971429b40..c4b0fdbfbdb 100644 --- a/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html b/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html index e82203830ed..4d77c0c8570 100644 --- a/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/overview/index.html b/zh-CN/docs/1.1.1/engine_usage/overview/index.html index 48da76c00c9..5b5ca129191 100644 --- a/zh-CN/docs/1.1.1/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html b/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html index 811790865d1..f0b744a8f90 100644 --- a/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/python/index.html b/zh-CN/docs/1.1.1/engine_usage/python/index.html index f3c0154b47c..5cd553067b5 100644 --- a/zh-CN/docs/1.1.1/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/shell/index.html b/zh-CN/docs/1.1.1/engine_usage/shell/index.html index 9946f18b25a..59ebed20f8b 100644 --- a/zh-CN/docs/1.1.1/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/spark/index.html b/zh-CN/docs/1.1.1/engine_usage/spark/index.html index f79c63b00fe..a94e39ed256 100644 --- a/zh-CN/docs/1.1.1/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/introduction/index.html b/zh-CN/docs/1.1.1/introduction/index.html index 75a9c7119d7..ec8bbbbcf19 100644 --- a/zh-CN/docs/1.1.1/introduction/index.html +++ b/zh-CN/docs/1.1.1/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/release/index.html b/zh-CN/docs/1.1.1/release/index.html index 863404d1f53..7e8b7f61d6a 100644 --- a/zh-CN/docs/1.1.1/release/index.html +++ b/zh-CN/docs/1.1.1/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/table/udf-table/index.html b/zh-CN/docs/1.1.1/table/udf-table/index.html index cd6b0a3c1ce..07ddf97e8f7 100644 --- a/zh-CN/docs/1.1.1/table/udf-table/index.html +++ b/zh-CN/docs/1.1.1/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tags/index.html b/zh-CN/docs/1.1.1/tags/index.html index 714b816a02c..083e59380cd 100644 --- a/zh-CN/docs/1.1.1/tags/index.html +++ b/zh-CN/docs/1.1.1/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html index f584515d3e9..ef191806434 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html index 60e3b7eeb15..d7510733c43 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html @@ -7,7 +7,7 @@ 常见错误码及处理方法 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html index 1fd33d3533b..059c82beadb 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html @@ -7,7 +7,7 @@ 接口错误排查 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html index a0b7130389a..23508c6c16f 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html index 84d08aea386..876bebf63c1 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index a4deb1dde7c..5414b7e44c3 100644 --- a/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html index c5e7689d9d2..0a680e49178 100644 --- a/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -37,7 +37,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/console_manual/index.html b/zh-CN/docs/1.1.1/user_guide/console_manual/index.html index bca081401ef..0a43dcd7732 100644 --- a/zh-CN/docs/1.1.1/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html index b87157b5a84..6f1b31e8514 100644 --- a/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html index 21d154e90b7..fbadf3defa1 100644 --- a/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html index 6d9dcf78d4b..e825eafbc56 100644 --- a/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/overview/index.html b/zh-CN/docs/1.1.1/user_guide/overview/index.html index 7af9e1409e7..8df4aa670cd 100644 --- a/zh-CN/docs/1.1.1/user_guide/overview/index.html +++ b/zh-CN/docs/1.1.1/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html index 16daccea07d..4d2163f9bd0 100644 --- a/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/udf/index.html b/zh-CN/docs/1.1.1/user_guide/udf/index.html index 302f233c4b8..ff84ebc7558 100644 --- a/zh-CN/docs/1.1.1/user_guide/udf/index.html +++ b/zh-CN/docs/1.1.1/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index d05edb9d2c9..ab6eb8f5822 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ 引擎插件API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index b8f94bfa988..aa474fce24e 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html index aa5d82500e7..3cf5b95fccf 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ 任务管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html index 7de01fe0931..c32a8d83f43 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ 任务操作 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 77d2967720b..238abca308b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC资源信息管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index a8d4f52a36d..86eec682282 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index 5ba9d3b25d8..b68ffa3b558 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ 引擎管理 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 0871d3c91a3..02bed01e6f1 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ 资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html index afac9f590ba..a5318665bc8 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ 上下文历史记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index c521741d7d5..e5fff38c9d6 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ 上下文监听服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index d9da3291fca..8984e7d5460 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ 上下文记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html index 3d36632f3cf..e7888443745 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ 上下文API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index aa4e987d7cb..86eeca8a843 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM项目操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 951e3212173..1ab9cff39cd 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index bbde0256cf8..df4f4bcc4d3 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html index 3ec4915cb30..468a89bcdcf 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ 通用API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index b769f2c66d1..22021113b41 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源API | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html index 6d103bef4fe..d4d4aa90cb4 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ 文件系统 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index 5f5c4d8daad..4074507ea31 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ 添加全局变量 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index f1cc8adfabe..325d51de518 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ 管理台首页API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 40ce6525f93..9bfe82488a5 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ 实例管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index 07aae975d23..39e9f060089 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ 历史作业API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html index e39f5ffa24f..05516ff0188 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis错误代码 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index 0e4f42aa682..b205b57283b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq表API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index 92b9f49a171..add0f7213db 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index 78dc483e36b..42a2be100b6 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ 参数配置 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html index d3461505e90..1e869c9749d 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/jdbc_api/index.html b/zh-CN/docs/1.1.3/api/jdbc_api/index.html index ce291bf5aa8..e6ab8a7aa33 100644 --- a/zh-CN/docs/1.1.3/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html index 63134082f2e..35cf35c931e 100644 --- a/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/login_api/index.html b/zh-CN/docs/1.1.3/api/login_api/index.html index 25f2d5e56ed..d41ab22b8c2 100644 --- a/zh-CN/docs/1.1.3/api/login_api/index.html +++ b/zh-CN/docs/1.1.3/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/overview/index.html b/zh-CN/docs/1.1.3/api/overview/index.html index 2789b790b42..cd6499a49c3 100644 --- a/zh-CN/docs/1.1.3/api/overview/index.html +++ b/zh-CN/docs/1.1.3/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html index 8137ce66e47..29b6c91f9ac 100644 --- a/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/commons/variable/index.html b/zh-CN/docs/1.1.3/architecture/commons/variable/index.html index 8edb36b51a4..969ae31e9b4 100644 --- a/zh-CN/docs/1.1.3/architecture/commons/variable/index.html +++ b/zh-CN/docs/1.1.3/architecture/commons/variable/index.html @@ -7,7 +7,7 @@ 自定义变量设计 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index aa1abe19eb3..73f5f8e975d 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 启动流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html index 421ce065be6..4d23d2cc21e 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html index 151fa22fc7a..c2c13dd4b40 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html @@ -7,7 +7,7 @@ EngineConn 历史信息记录特性 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index e80e62d7815..50f5cf3f612 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html index e769dad320e..b4bd5b9cc2a 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html @@ -7,7 +7,7 @@ EngineConn Metrics 上报特性 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 225ac32732c..6fc91be9c93 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html index 55415f69377..12321501e2c 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index f89525b2510..d2ed43be66f 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Linkis任务执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html index f2ea3478b3a..949212eaf78 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 4c3ab5810d9..4e083c1e87c 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index aef60794ba5..50d70b93a0e 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 66ae162cf00..ebea203afad 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 94cdc4eeea4..62f3a6d7ed0 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html index 2131d46141f..d96f8e5d614 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html index e0a90710c23..ee07dea5ee8 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Linkis支持代理用户提交架构涉及 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html index b1d90df3435..69eca254b55 100644 --- a/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html index da3b0d0d637..267f3fe0406 100644 --- a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html index 04e686d3861..f856b31cd55 100644 --- a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/overview/index.html b/zh-CN/docs/1.1.3/architecture/overview/index.html index 5606d941a78..d10dd795026 100644 --- a/zh-CN/docs/1.1.3/architecture/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 6140ace14b0..f72429079f1 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html index 69c4f490bac..df56c1578b6 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html index e45c31e5fba..419d8dbccf2 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html @@ -7,7 +7,7 @@ CS 清理接口特性 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html index 19306b532c3..5aa150b9a6d 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 766304f037c..beab8e6df02 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index d7434c7bebd..844e65c88f8 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 2c986230c8a..8961ab00b44 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 34a662eb19d..310b893162d 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index ef6f31a17f3..2ff56777fab 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index a93da13f189..dbebadca4cc 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html index cc7aeeaab49..39e085ea9ba 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html index 3be265d9611..7251010cf4f 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html index fcee42570f7..c08f851d9ce 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html index 58ae5de16f7..c712b693390 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html index 8af5ec796da..04d77b98b85 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html index e3fc0b13715..5afd0d0648b 100644 --- a/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html b/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html index 4c568875b5d..ad30d8ae4aa 100644 --- a/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html +++ b/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Linkis 去 HDFS 部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html index 72fa1d34759..e319e70c92e 100644 --- a/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html index a9bff998e39..b306f1e252e 100644 --- a/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html index 8969d83f6f6..05ae59092e6 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html @@ -7,7 +7,7 @@ 启用 Knife4j | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html index 840f3997e8e..8e404a904b5 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html @@ -7,7 +7,7 @@ 开启Prometheus监控 | Apache Linkis - + @@ -30,7 +30,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html index 3ef55cbe98a..88fa75ff763 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html index 238639ddba5..28743c45fc7 100644 --- a/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具 scriptis 的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html index 6e8f5ce26ad..d1c590c3641 100644 --- a/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html index e1279abbf84..ce72f1fd2fa 100644 --- a/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html index 7feb88aa09d..4a66afb473e 100644 --- a/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html index 51dda580a96..e76cf07bb21 100644 --- a/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/web_install/index.html b/zh-CN/docs/1.1.3/deployment/web_install/index.html index 6b8e24c2ccb..2b0b2477ffa 100644 --- a/zh-CN/docs/1.1.3/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html index db0e97bdefc..9128dcdc7b9 100644 --- a/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_config/index.html b/zh-CN/docs/1.1.3/development/linkis_config/index.html index a3f72f618ac..01652048c15 100644 --- a/zh-CN/docs/1.1.3/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_debug/index.html b/zh-CN/docs/1.1.3/development/linkis_debug/index.html index 5a48c1c6f43..2b981b269eb 100644 --- a/zh-CN/docs/1.1.3/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 服务调试指引 | Apache Linkis - + @@ -52,7 +52,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html index 483a41393dd..66907a40753 100644 --- a/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/new_engine_conn/index.html b/zh-CN/docs/1.1.3/development/new_engine_conn/index.html index 56190d69570..4c99ceff9c3 100644 --- a/zh-CN/docs/1.1.3/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/web_build/index.html b/zh-CN/docs/1.1.3/development/web_build/index.html index 9377cbdb096..09b5d66c253 100644 --- a/zh-CN/docs/1.1.3/development/web_build/index.html +++ b/zh-CN/docs/1.1.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/flink/index.html b/zh-CN/docs/1.1.3/engine_usage/flink/index.html index 3db7242a38d..459b59fa02e 100644 --- a/zh-CN/docs/1.1.3/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/hive/index.html b/zh-CN/docs/1.1.3/engine_usage/hive/index.html index 32046acffe8..ebaafbbf3a6 100644 --- a/zh-CN/docs/1.1.3/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html index d8e0acbcb12..623ade3c4ed 100644 --- a/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html b/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html index 1e67c9dab01..bad3de8e045 100644 --- a/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/overview/index.html b/zh-CN/docs/1.1.3/engine_usage/overview/index.html index 0f7365a887b..34cd501f072 100644 --- a/zh-CN/docs/1.1.3/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html b/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html index 047709553ff..8bf2b33f6b6 100644 --- a/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/python/index.html b/zh-CN/docs/1.1.3/engine_usage/python/index.html index c346caebcb3..bea6f5d21e2 100644 --- a/zh-CN/docs/1.1.3/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/shell/index.html b/zh-CN/docs/1.1.3/engine_usage/shell/index.html index a0635f92fcc..23f0dd7187a 100644 --- a/zh-CN/docs/1.1.3/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/spark/index.html b/zh-CN/docs/1.1.3/engine_usage/spark/index.html index be5058369d8..9a99d3ed6c8 100644 --- a/zh-CN/docs/1.1.3/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html b/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html index eb70df6f9bb..12b979f2d1a 100644 --- a/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop 引擎 | Apache Linkis - + @@ -32,7 +32,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/introduction/index.html b/zh-CN/docs/1.1.3/introduction/index.html index 341bd7807ad..ff0adc6ad72 100644 --- a/zh-CN/docs/1.1.3/introduction/index.html +++ b/zh-CN/docs/1.1.3/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html b/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html index 68c017e1cdf..dcda3e025a0 100644 --- a/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html +++ b/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html @@ -7,18 +7,18 @@ Release Notes 1.1.3-RC1 | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Release Notes 1.1.3-RC1

    Apache Linkis(incubating) 1.1.3 包括所有 Project Linkis-1.1.3

    本次发布主要 集成Prometheus,提供linkis微服务监控的基础能力;任务提交新增任务重试次数参数;增加任务与执行EC的关联信息记录;Flink引擎支持将 Yarn 日志下载到 EC 日志目录;前端页面支持水印;部分安全漏洞组件升级等;修复社区反馈的已知 bug

    主要功能如下:

    • 集成prometheus,提供linkis微服务监控的基础能力
    • 任务提交支持任务重试次数的参数
    • Flink引擎支持将 Yarn 日志下载到 EC 日志目录
    • 部分依赖包的升级和社区已知问题修复

    缩写:

    • COMMON: Linkis Common
    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager
    • PS: Linkis Public Service
    • PE: Linkis Public Enhancement
    • RPC: Linkis Common RPC
    • CG: Linkis Computation Governance

    新特性#

    • [Common][Linkis-1656] 集成prometheus,提供linkis微服务监控的基础能力
    • [EC-Flink][Linkis-2241] 添加 Yarn Log Operator,支持将 Yarn 日志下载到 EC 日志目录
    • [Web][Linkis-2235] 前端页面支持水印
    • [Entrance][Linkis-2164] Entrance 支持任务重试次数的参数
    • [EC][Linkis-2163] 增加任务与执行EC的记录,EC信息记录到任务的 Metrics字段中

    增强点#

    • [ECM][Linkis-2243] 优化新注册的ECM服务,优化服务负载选择逻辑,减下可能存在的因为新服务可用性问题造成的影响
    • [PS-Jobhistory][Linkis-2198] 优化任务代码缓存文件名,增加时间参数,避免长任务存在的冲突问题
    • [EC-Python][Linkis-2175] 增加py4j的watchdog线程,监控java进程,防止java进程异常退出后,python进程没有退出的情况
    • [Common][Linkis-2150] common和entrance模块都存在自定义变量替换的逻辑,优化聚集到common模块中处理
    • [EC-JDBC][Linkis-2142] 修复JDBC Engine 控制台配置修改后无法立即生效的问题(cache时间调整为配置项)
    • [Entrance][Linkis-2160] 任务提交的消费队列支持配置特定大容量用户
    • [PE][Linkis-2200] 标签代码优化,去除标签key-value的持久化
    • [EC][Linkis-1749] 支持EC启动时 ,能够通过参数进行指定服务的端口段的限制
    • [Common-Storage][Linkis-2168] FileSource中文件类型支持变量配置
    • [Common-Storage][Linkis-2161] 新增对结果集导出到 excel文件时,自动格式化参数的支持
    • [Gateway][Linkis-2249] 优化gateway的Parser逻辑代码
    • [Web][Linkis-2248] 用户资源展示页面按用户和创建者排序展示
    • [Web][Linkis-2108] 前端页面布局优化调整,统一基本样式,优化二级菜单展示
    • [Install][Linkis-2319] 调整数据源服务部署模式,默认为开启;支持安装时,配置初始登陆密码
    • [Install][Linkis-2421] 支持安装部署时,配置kerberos相关认证信息
    • [EC][Linkis-2159] EC的log日志支持按大小和时间切割滚动
    • [Common-Scheduler][Linkis-2272] 优化代码格式增加LoopArray单元测试
    • [PS-ContextService][Linkis-2234] 在contextservice添加了批量清理context值的方法

    修复功能#

    • [EC][Linkis-2275] 修复EC引擎心跳上报在异常场景下日志字段过长导致存储失败问题
    • [Web][Linkis-2239] 修复yarm 队列资源空闲/繁忙状态使用率的环形占比图显示不正确问题
    • [PS-ContextService][Linkis-2226] 修复 FileReader 和 BufferedReader 资源在 final 中未释放的问题
    • [Install][Linkis-2159] 不同系统编译出现shell脚本授权+x权限失败问题
    • [Entrance][Linkis-2237] 重构 JobQueueLabel 和 JobRunningLabel,修复任务排队标签和任务运行标签bug
    • [Build][Linkis-2354] 修复WIN系统下 编译打包项目存在的ERROR级别的警告问题
    • [Gateway][Linkis-2329] 修复LDAP接入存在的配置问题
    • [Entrance][Linkis-2238] 优化结果集路径以日期分隔,解决单个文件夹子目录过多问题 不同日期的resustset路径在同一个文件夹,如“/tmp/linkis/hadoop/linkis/20220516_210525/IDE/40099”,可能会导致一个文件夹下文件太多
    • [Entrance][Linkis-2162] 优化结果集路径以日期分隔,解决单个文件夹子目录过多问题
    • [Common][Linkis-2332] 关闭SpringCloud 默认配置中心,减少不必要日志信息的干扰
    • [Web][Linkis-2295] 移除web安装脚本中多余的代码

    安全相关#

    • [PS-Jobhistory][Linkis-2248] 任务查询列表接口增加参数校验,防止sql注入的安全问题
    • [PS-PublicService][Linkis-1949] /api/rest_j/v1/datasource/columns 接口增加用户权限检查

    依赖变更#

    • [Common][Linkis-2188] 升级poi 5.2.1至poi 5.2.2,修复可能出现的内存分配问题
    • [Common][Linkis-2182] 升级 gson:2.8.5 至gson:2.8.9版本

    致谢#

    Apache Linkis(incubating) 1.1.3的发布离不开Linkis社区的贡献者,感谢所有的社区贡献者,包括但不仅限于以下Contributors(排名不发先后): Alexkun、CCweixiao、Davidhua1996、QuintinTao、casionone、det101、doroxinrui、huiyuanjjjjuice、husofskyzy、hzdhgf、jackxu2011、legendtkl、liuyou2、peacewong、peacewong 、pjfanning、ruY9527、saLeox、seayi、wForget、wallezhang、yyuser5201314

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/table/udf-table/index.html b/zh-CN/docs/1.1.3/table/udf-table/index.html index fd1e9f85248..9826986decf 100644 --- a/zh-CN/docs/1.1.3/table/udf-table/index.html +++ b/zh-CN/docs/1.1.3/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tags/feature/index.html b/zh-CN/docs/1.1.3/tags/feature/index.html index 5584449ecb2..a00d19cc769 100644 --- a/zh-CN/docs/1.1.3/tags/feature/index.html +++ b/zh-CN/docs/1.1.3/tags/feature/index.html @@ -7,7 +7,7 @@ One doc tagged with "Feature" | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tags/index.html b/zh-CN/docs/1.1.3/tags/index.html index 69dd83f31c7..605e142b843 100644 --- a/zh-CN/docs/1.1.3/tags/index.html +++ b/zh-CN/docs/1.1.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html index ecbe621c807..1d873fabac5 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html index 33dad0e268a..675a1ed90df 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html index 1281b373e60..d0e24c4de10 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 03fe747f24c..7c3f50d8733 100644 --- a/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html index 75c5aa55bbb..e24697005f9 100644 --- a/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/console_manual/index.html b/zh-CN/docs/1.1.3/user_guide/console_manual/index.html index b0e81abd63a..852d19c9bba 100644 --- a/zh-CN/docs/1.1.3/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html index b938121d8b4..9d7780f4388 100644 --- a/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html index 7ab9a89259e..3b7799ac82a 100644 --- a/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html index da1a581b4d7..28bd2fff34e 100644 --- a/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/overview/index.html b/zh-CN/docs/1.1.3/user_guide/overview/index.html index dbc25736b69..bbe7ef8e149 100644 --- a/zh-CN/docs/1.1.3/user_guide/overview/index.html +++ b/zh-CN/docs/1.1.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html index c4cede34392..15cc47d27ef 100644 --- a/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/udf/index.html b/zh-CN/docs/1.1.3/user_guide/udf/index.html index 15adea1bf60..ad9dd282059 100644 --- a/zh-CN/docs/1.1.3/user_guide/udf/index.html +++ b/zh-CN/docs/1.1.3/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 88d4a8bcea6..27bf181de68 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ 引擎插件API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index 68f5880ecc7..5a314ba91d1 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html index 71d424cf220..ae30b1c30d9 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ 任务管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html index c9dc5ebcc65..c677e44d96b 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ 任务操作 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 0fbf2f61197..8a050cc4d64 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC资源信息管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index 1e4d0d406c2..b45ea5d5120 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index de2c70b156d..3f9df1864e7 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ 引擎管理 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index b7d26fbc3e8..0b285ad4a44 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ 资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html index 04311434521..c235715a06f 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ 上下文历史记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index a1b7e83ca5b..9ffeaf41926 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ 上下文监听服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 62aa535ded8..e871117b143 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ 上下文记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html index 0ef7fb6ecf2..61f70214b72 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ 上下文API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 844044c7dca..639a5c29de4 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM项目操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 80f4e2b1b2b..b5f6d006e2b 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index b23d47e7ea3..bd77bf7438a 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html index a74987e8e5f..57d7e4ae5cc 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ 通用API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index 5cdceb704ab..167f0519bc4 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源API | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html index abc5dd82466..a4cade7d300 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ 文件系统 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index 0a012a58797..4ae10053902 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ 添加全局变量 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index ee834985fe3..962f2a5ba45 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ 管理台首页API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 101528f1f89..46468415a4d 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ 实例管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index b9c05be8bf7..10d4b56ba6c 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ 历史作业API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html index 51b28973992..34edd25f395 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis错误代码 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index 144cf80ebb6..60323af09dd 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq表API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index 7116b85f476..be3ce00c1ac 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index bd9abc33038..0a0fe126d8f 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ 参数配置 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html index 3c2b413e802..f448b64c798 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/jdbc_api/index.html b/zh-CN/docs/latest/api/jdbc_api/index.html index 4c77134c65f..1960ac971c7 100644 --- a/zh-CN/docs/latest/api/jdbc_api/index.html +++ b/zh-CN/docs/latest/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/linkis_task_operator/index.html b/zh-CN/docs/latest/api/linkis_task_operator/index.html index 1f76f21b17a..d85ee06303c 100644 --- a/zh-CN/docs/latest/api/linkis_task_operator/index.html +++ b/zh-CN/docs/latest/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/login_api/index.html b/zh-CN/docs/latest/api/login_api/index.html index eea82149205..86d04b33004 100644 --- a/zh-CN/docs/latest/api/login_api/index.html +++ b/zh-CN/docs/latest/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/overview/index.html b/zh-CN/docs/latest/api/overview/index.html index 0efc3e2dab6..10c86384d7c 100644 --- a/zh-CN/docs/latest/api/overview/index.html +++ b/zh-CN/docs/latest/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html b/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html index 637c602cbc0..1bf99400e06 100644 --- a/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/commons/rpc/index.html b/zh-CN/docs/latest/architecture/commons/rpc/index.html index bc9f398f662..c92c24a831b 100644 --- a/zh-CN/docs/latest/architecture/commons/rpc/index.html +++ b/zh-CN/docs/latest/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index fe53cf7895a..0e0669c5b21 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 启动流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html index 503154d93f4..5b1cd325afa 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 3b8ab122c41..ba95ccc50fd 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 519bd4595a0..b8635d64349 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html index 0b2749dd9f6..f5e1d020714 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index 90591731f0e..a7b1dd4868b 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Linkis任务执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html index fa58e8bfdd4..476b079a466 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html index e9a99b7e056..97ab581f8a8 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 3b298623a28..571e97428f6 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html index 5253e89ca66..0b9583caafb 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 153f217f2de..4e15e95b733 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html index 843db70f152..07fba373b36 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html index 00ddc3cc186..19e8ee9d957 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html index b2ba4b7a844..b1a1ad9808d 100644 --- a/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html index abcdb3a4200..1096160090e 100644 --- a/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html index 26732bac1f2..b7f5f172e99 100644 --- a/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/overview/index.html b/zh-CN/docs/latest/architecture/overview/index.html index bfcfaa5786b..684042d0935 100644 --- a/zh-CN/docs/latest/architecture/overview/index.html +++ b/zh-CN/docs/latest/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index b2ccbef48f2..3429367a1e1 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html index 9095bd8146a..e067c05827c 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html index d8386f1c369..867f4445df9 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html index bf37325a878..4722b66da5e 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html index 6ce70ab5511..d9f6147c441 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index e9c2b02027c..d9ec5640995 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html index ae9f2401b66..9a293ae86f0 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 20860d02ef4..f67fd54a7bb 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html index 801ec567243..49980dc314d 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html index 74b226f0b87..a95ec02de5e 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html index 08fabd0cd33..1646866abce 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html index c08c8c46cff..ec1daeb37b4 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html index 3b4bb8faae8..bf7f5898f4d 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html index 4ef0aec1ea1..51c150b09fa 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/cluster_deployment/index.html b/zh-CN/docs/latest/deployment/cluster_deployment/index.html index a26265e8c42..31a14768ead 100644 --- a/zh-CN/docs/latest/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/latest/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html b/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html index 189f9e23c09..9f386d9ff97 100644 --- a/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html +++ b/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Linkis 去 HDFS 部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html index 36ece2ddb0e..804b4d27d26 100644 --- a/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html index 56859629241..68dcad363ef 100644 --- a/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html index 6b9360d2af0..b59e6dcca9e 100644 --- a/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html index 7db50554e52..2abe6c044ca 100644 --- a/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具Scriptis的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/quick_deploy/index.html b/zh-CN/docs/latest/deployment/quick_deploy/index.html index 0a6fee40558..d369c57d477 100644 --- a/zh-CN/docs/latest/deployment/quick_deploy/index.html +++ b/zh-CN/docs/latest/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html index 8ed71ad9da6..3f3335ba6ed 100644 --- a/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/start_metadatasource/index.html b/zh-CN/docs/latest/deployment/start_metadatasource/index.html index 2687c8721e3..ce439de4af0 100644 --- a/zh-CN/docs/latest/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/latest/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html index 21b57e1fb02..dc5ff594304 100644 --- a/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/web_install/index.html b/zh-CN/docs/latest/deployment/web_install/index.html index 2a46145993e..df4b8d203d6 100644 --- a/zh-CN/docs/latest/deployment/web_install/index.html +++ b/zh-CN/docs/latest/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_compile_and_package/index.html b/zh-CN/docs/latest/development/linkis_compile_and_package/index.html index 3e970981757..718158e3153 100644 --- a/zh-CN/docs/latest/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/latest/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_config/index.html b/zh-CN/docs/latest/development/linkis_config/index.html index 633e9f37797..df26c673fdb 100644 --- a/zh-CN/docs/latest/development/linkis_config/index.html +++ b/zh-CN/docs/latest/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_debug/index.html b/zh-CN/docs/latest/development/linkis_debug/index.html index dc6e951d41f..22a62c6261a 100644 --- a/zh-CN/docs/latest/development/linkis_debug/index.html +++ b/zh-CN/docs/latest/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 服务调试指引 | Apache Linkis - + @@ -52,7 +52,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html b/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html index 17e7f620a31..e041afb18f9 100644 --- a/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/new_engine_conn/index.html b/zh-CN/docs/latest/development/new_engine_conn/index.html index 181790af668..eefcb6382dc 100644 --- a/zh-CN/docs/latest/development/new_engine_conn/index.html +++ b/zh-CN/docs/latest/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/web_build/index.html b/zh-CN/docs/latest/development/web_build/index.html index e1d06c823cd..f04d2cb16dd 100644 --- a/zh-CN/docs/latest/development/web_build/index.html +++ b/zh-CN/docs/latest/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/flink/index.html b/zh-CN/docs/latest/engine_usage/flink/index.html index 8496033e4e8..b5d777bb9b8 100644 --- a/zh-CN/docs/latest/engine_usage/flink/index.html +++ b/zh-CN/docs/latest/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/hive/index.html b/zh-CN/docs/latest/engine_usage/hive/index.html index 165b80ae906..78dd1d8e593 100644 --- a/zh-CN/docs/latest/engine_usage/hive/index.html +++ b/zh-CN/docs/latest/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/jdbc/index.html b/zh-CN/docs/latest/engine_usage/jdbc/index.html index b41bf24933e..758af08d8a4 100644 --- a/zh-CN/docs/latest/engine_usage/jdbc/index.html +++ b/zh-CN/docs/latest/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/openlookeng/index.html b/zh-CN/docs/latest/engine_usage/openlookeng/index.html index b8a0b710c1b..f994e37fcff 100644 --- a/zh-CN/docs/latest/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/latest/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/overview/index.html b/zh-CN/docs/latest/engine_usage/overview/index.html index 19cca400c85..a7166cb7a00 100644 --- a/zh-CN/docs/latest/engine_usage/overview/index.html +++ b/zh-CN/docs/latest/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/pipeline/index.html b/zh-CN/docs/latest/engine_usage/pipeline/index.html index 26fb6af5bae..4d3777ff6ad 100644 --- a/zh-CN/docs/latest/engine_usage/pipeline/index.html +++ b/zh-CN/docs/latest/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/python/index.html b/zh-CN/docs/latest/engine_usage/python/index.html index f0ef91dcda2..fc4b8095750 100644 --- a/zh-CN/docs/latest/engine_usage/python/index.html +++ b/zh-CN/docs/latest/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/shell/index.html b/zh-CN/docs/latest/engine_usage/shell/index.html index fffd6ae2cd2..90b30d03cd8 100644 --- a/zh-CN/docs/latest/engine_usage/shell/index.html +++ b/zh-CN/docs/latest/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/spark/index.html b/zh-CN/docs/latest/engine_usage/spark/index.html index afeb34ba6a8..3698df7768a 100644 --- a/zh-CN/docs/latest/engine_usage/spark/index.html +++ b/zh-CN/docs/latest/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/sqoop/index.html b/zh-CN/docs/latest/engine_usage/sqoop/index.html index 8f03168ed19..2c02b6059d4 100644 --- a/zh-CN/docs/latest/engine_usage/sqoop/index.html +++ b/zh-CN/docs/latest/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop 引擎 | Apache Linkis - + @@ -32,7 +32,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/introduction/index.html b/zh-CN/docs/latest/introduction/index.html index a36150d7cdc..832bc8a1136 100644 --- a/zh-CN/docs/latest/introduction/index.html +++ b/zh-CN/docs/latest/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/release/index.html b/zh-CN/docs/latest/release/index.html index 73459cf8555..9d1da21103b 100644 --- a/zh-CN/docs/latest/release/index.html +++ b/zh-CN/docs/latest/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/table/udf-table/index.html b/zh-CN/docs/latest/table/udf-table/index.html index 0f39a9c0ea8..2f66e358bc4 100644 --- a/zh-CN/docs/latest/table/udf-table/index.html +++ b/zh-CN/docs/latest/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tags/index.html b/zh-CN/docs/latest/tags/index.html index a3d29f773c2..453839c87e2 100644 --- a/zh-CN/docs/latest/tags/index.html +++ b/zh-CN/docs/latest/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html index c1c229f99e8..f20568d32bb 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html index 478bdabd1e8..5e5cae87022 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html index 88ff545ed61..7cd40f12b8f 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 2365607fe2e..4e4bd609a3d 100644 --- a/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/upgrade/upgrade_guide/index.html b/zh-CN/docs/latest/upgrade/upgrade_guide/index.html index f1d45249242..5ed5c289eed 100644 --- a/zh-CN/docs/latest/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/latest/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/console_manual/index.html b/zh-CN/docs/latest/user_guide/console_manual/index.html index 4cce5bcdd8f..18c96ddc385 100644 --- a/zh-CN/docs/latest/user_guide/console_manual/index.html +++ b/zh-CN/docs/latest/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/how_to_use/index.html b/zh-CN/docs/latest/user_guide/how_to_use/index.html index f222e5473f1..7108afcb418 100644 --- a/zh-CN/docs/latest/user_guide/how_to_use/index.html +++ b/zh-CN/docs/latest/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html index c2caf2b4f74..1bfbf625018 100644 --- a/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html b/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html index 5827172e73d..a9e7948d0b8 100644 --- a/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/overview/index.html b/zh-CN/docs/latest/user_guide/overview/index.html index d4fddf86681..31be9b2a141 100644 --- a/zh-CN/docs/latest/user_guide/overview/index.html +++ b/zh-CN/docs/latest/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/sdk_manual/index.html b/zh-CN/docs/latest/user_guide/sdk_manual/index.html index 98b14bc86f4..4e572f748f9 100644 --- a/zh-CN/docs/latest/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/latest/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/udf/index.html b/zh-CN/docs/latest/user_guide/udf/index.html index 17a66127a81..e253cb7eb98 100644 --- a/zh-CN/docs/latest/user_guide/udf/index.html +++ b/zh-CN/docs/latest/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/download-logo/index.html b/zh-CN/download/download-logo/index.html index 645eecdc9d0..ae043447dde 100644 --- a/zh-CN/download/download-logo/index.html +++ b/zh-CN/download/download-logo/index.html @@ -7,7 +7,7 @@ 下载Logo | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/main/index.html b/zh-CN/download/main/index.html index bb9e4930ea8..e94983140d8 100644 --- a/zh-CN/download/main/index.html +++ b/zh-CN/download/main/index.html @@ -7,7 +7,7 @@ 版本列表 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.0.2/index.html b/zh-CN/download/release-notes-1.0.2/index.html index ee3f8b79d7e..949ccdf0e6d 100644 --- a/zh-CN/download/release-notes-1.0.2/index.html +++ b/zh-CN/download/release-notes-1.0.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.2 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.0.3/index.html b/zh-CN/download/release-notes-1.0.3/index.html index b46ad488329..a7b67059816 100644 --- a/zh-CN/download/release-notes-1.0.3/index.html +++ b/zh-CN/download/release-notes-1.0.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.3 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.0/index.html b/zh-CN/download/release-notes-1.1.0/index.html index 2354f24cc56..66b55cbfba1 100644 --- a/zh-CN/download/release-notes-1.1.0/index.html +++ b/zh-CN/download/release-notes-1.1.0/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.0 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.1/index.html b/zh-CN/download/release-notes-1.1.1/index.html index 97c841d8820..8ba7804e968 100644 --- a/zh-CN/download/release-notes-1.1.1/index.html +++ b/zh-CN/download/release-notes-1.1.1/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.1 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.2/index.html b/zh-CN/download/release-notes-1.1.2/index.html index db6f62d6d56..05b6cac1af0 100644 --- a/zh-CN/download/release-notes-1.1.2/index.html +++ b/zh-CN/download/release-notes-1.1.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.2 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/faq/main/index.html b/zh-CN/faq/main/index.html index 589e1ed82c5..904a074ebfd 100644 --- a/zh-CN/faq/main/index.html +++ b/zh-CN/faq/main/index.html @@ -7,7 +7,7 @@ Q&A | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/home/index.html b/zh-CN/home/index.html index 132de2ef80b..1a8d859a3b6 100644 --- a/zh-CN/home/index.html +++ b/zh-CN/home/index.html @@ -7,14 +7,14 @@ - +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/zh-CN/index.html b/zh-CN/index.html index e066675f9d8..d763071501a 100644 --- a/zh-CN/index.html +++ b/zh-CN/index.html @@ -7,7 +7,7 @@ Apache Linkis | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/search/index.html b/zh-CN/search/index.html index c762f8988d9..87da2ea0f01 100644 --- a/zh-CN/search/index.html +++ b/zh-CN/search/index.html @@ -7,7 +7,7 @@ Search the documentation | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/team/index.html b/zh-CN/team/index.html index 3d41ed41926..e6b85a80c45 100644 --- a/zh-CN/team/index.html +++ b/zh-CN/team/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/user/index.html b/zh-CN/user/index.html index ef63d209391..6031c506f30 100644 --- a/zh-CN/user/index.html +++ b/zh-CN/user/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/versions/index.html b/zh-CN/versions/index.html index 6875ded2e5f..c29f9c0c1b2 100644 --- a/zh-CN/versions/index.html +++ b/zh-CN/versions/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file